@inproceedings {INPROC-2024-10,
   author = {Dennis Treder-Tschechlov and Manuel Fritz and Holger Schwarz and Bernhard Mitschang},
   title = {{Ensemble Clustering based on Meta-Learning and Hyperparameter Optimization}},
   booktitle = {Proc. VLDB Endow. 17, 11 (July 2024), 2880–2892.},
   editor = {Proceedings of the VLDB Endowment},
   publisher = {ACM},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {2880--2892},
   type = {Konferenz-Beitrag},
   month = {August},
   year = {2024},
   doi = {https://doi.org/10.14778/3681954.3681970},
   language = {Englisch},
   cr-category = {I.5.3 Pattern Recognition Clustering},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Efficient clustering algorithms, such as k-Means, are often used in practice because they scale well for large datasets. However, they are only able to detect simple data characteristics. Ensemble clustering can overcome this limitation by combining multiple results of efficient algorithms. However, analysts face several challenges when applying ensemble clustering, i. e., analysts struggle to (a) efficiently generate an ensemble and (b) combine the ensemble using a suitable consensus function with a corresponding hyperparameter setting. In this paper, we propose EffEns, an efficient ensemble clustering approach to address these challenges. Our approach relies on meta-learning to learn about dataset characteristics and the correlation between generated base clusterings and the performance of consensus functions. We apply the learned knowledge to generate appropriate ensembles and select a suitable consensus function to combine their results. Further, we use a state-of-the-art optimization technique to tune the hyperparameters of the selected consensus function. Our comprehensive evaluation on synthetic and real-world datasets demonstrates that EffEns significantly outperforms state-of-the-art approaches w.r.t. accuracy and runtime.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2024-10&engl=0}
}
@inproceedings {INPROC-2024-09,
   author = {Dennis Treder-Tschechlov and Manuel Fritz and Holger Schwarz and Bernhard Mitschang},
   title = {{Ensemble Clustering based on Meta-Learning and Hyperparameter Optimization}},
   booktitle = {Proc. VLDB Endow. 17, 11 (July 2024), 2880–2892.},
   editor = {Proceedings of the VLDB Endowment},
   publisher = {ACM},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {2880--2892},
   type = {Konferenz-Beitrag},
   month = {August},
   year = {2024},
   doi = {https://doi.org/10.14778/3681954.3681970},
   language = {Englisch},
   cr-category = {I.5.3 Pattern Recognition Clustering},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Efficient clustering algorithms, such as k-Means, are often used in practice because they scale well for large datasets. However, they are only able to detect simple data characteristics. Ensemble clustering can overcome this limitation by combining multiple results of efficient algorithms. However, analysts face several challenges when applying ensemble clustering, i. e., analysts struggle to (a) efficiently generate an ensemble and (b) combine the ensemble using a suitable consensus function with a corresponding hyperparameter setting. In this paper, we propose EffEns, an efficient ensemble clustering approach to address these challenges. Our approach relies on meta-learning to learn about dataset characteristics and the correlation between generated base clusterings and the performance of consensus functions. We apply the learned knowledge to generate appropriate ensembles and select a suitable consensus function to combine their results. Further, we use a state-of-the-art optimization technique to tune the hyperparameters of the selected consensus function. Our comprehensive evaluation on synthetic and real-world datasets demonstrates that EffEns significantly outperforms state-of-the-art approaches w.r.t. accuracy and runtime.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2024-09&engl=0}
}
@inproceedings {INPROC-2024-08,
   author = {Dennis Treder-Tschechlov and Manuel Fritz and Holger Schwarz and Bernhard Mitschang},
   title = {{Ensemble Clustering based on Meta-Learning and Hyperparameter Optimization}},
   booktitle = {Proc. VLDB Endow. 17, 11 (July 2024), 2880–2892.},
   editor = {Proceedings of the VLDB Endowment},
   publisher = {ACM},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {2880--2892},
   type = {Konferenz-Beitrag},
   month = {August},
   year = {2024},
   doi = {https://doi.org/10.14778/3681954.3681970},
   language = {Englisch},
   cr-category = {I.5.3 Pattern Recognition Clustering},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Efficient clustering algorithms, such as k-Means, are often used in practice because they scale well for large datasets. However, they are only able to detect simple data characteristics. Ensemble clustering can overcome this limitation by combining multiple results of efficient algorithms. However, analysts face several challenges when applying ensemble clustering, i. e., analysts struggle to (a) efficiently generate an ensemble and (b) combine the ensemble using a suitable consensus function with a corresponding hyperparameter setting. In this paper, we propose EffEns, an efficient ensemble clustering approach to address these challenges. Our approach relies on meta-learning to learn about dataset characteristics and the correlation between generated base clusterings and the performance of consensus functions. We apply the learned knowledge to generate appropriate ensembles and select a suitable consensus function to combine their results. Further, we use a state-of-the-art optimization technique to tune the hyperparameters of the selected consensus function. Our comprehensive evaluation on synthetic and real-world datasets demonstrates that EffEns significantly outperforms state-of-the-art approaches w.r.t. accuracy and runtime.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2024-08&engl=0}
}
@inproceedings {INPROC-2024-07,
   author = {Dennis Treder-Tschechlov and Manuel Fritz and Holger Schwarz and Bernhard Mitschang},
   title = {{Ensemble Clustering based on Meta-Learning and Hyperparameter Optimization}},
   booktitle = {Proc. VLDB Endow. 17, 11 (July 2024), 2880–2892.},
   editor = {Proceedings of the VLDB Endowment},
   publisher = {VLDB Endowment},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {2880--2892},
   type = {Konferenz-Beitrag},
   month = {August},
   year = {2024},
   language = {Englisch},
   cr-category = {I.5.3 Pattern Recognition Clustering},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Efficient clustering algorithms, such as k-Means, are often used in practice because they scale well for large datasets. However, they are only able to detect simple data characteristics. Ensemble clustering can overcome this limitation by combining multiple results of efficient algorithms. However, analysts face several challenges when applying ensemble clustering, i. e., analysts struggle to (a) efficiently generate an ensemble and (b) combine the ensemble using a suitable consensus function with a corresponding hyperparameter setting. In this paper, we propose EffEns, an efficient ensemble clustering approach to address these challenges. Our approach relies on meta-learning to learn about dataset characteristics and the correlation between generated base clusterings and the performance of consensus functions. We apply the learned knowledge to generate appropriate ensembles and select a suitable consensus function to combine their results. Further, we use a state-of-the-art optimization technique to tune the hyperparameters of the selected consensus function. Our comprehensive evaluation on synthetic and real-world datasets demonstrates that EffEns significantly outperforms state-of-the-art approaches w.r.t. accuracy and runtime.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2024-07&engl=0}
}
@inproceedings {INPROC-2024-06,
   author = {Christoph Stach and Yunxuan Li and Laura Schuiki and Bernhard Mitschang},
   title = {{LALO—A Virtual Data Lake Zone for Composing Tailor-Made Data Products on Demand}},
   booktitle = {Proceedings of the 35th International Conference on Database and Expert Systems Applications (DEXA 2024)},
   editor = {Christine Strauss and Toshiyuki Amagasa and Giuseppe Manco and Gabriele Kotsis and A Min Tjoa and Ismail Khalil},
   address = {Cham},
   publisher = {Springer},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Lecture Notes in Computer Science},
   volume = {14911},
   pages = {288--305},
   type = {Konferenz-Beitrag},
   month = {August},
   year = {2024},
   isbn = {978-3-031-68311-4},
   issn = {0302-9743},
   doi = {10.1007/978-3-031-68312-1_22},
   keywords = {Data Product; Virtual Data Lake Zone; Data Stream Adaptation},
   language = {Englisch},
   cr-category = {H.2.7 Database Administration,     E.2 Data Storage Representations,     H.3.3 Information Search and Retrieval,     H.2.8 Database Applications},
   contact = {Senden Sie eine E-Mail an \<christoph.stach@ipvs.uni-stuttgart.de\>.},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The emerging paradigm of data products, which has become increasingly popular recently due to the rise of data meshes and data marketplaces, also poses unprecedented challenges for data management. Current data architectures, namely data warehouses and data lakes, are not able to meet these challenges adequately. In particular, these architectures are not designed for a just-in-time provision of highly customized data products tailored perfectly to the needs of customers. In this paper, we therefore present a virtual data lake zone for composing tailor-made data products on demand, called LALO. LALO uses data streaming technologies to enable just-in-time composing of data products without allocating storage space in the data architecture permanently. In order to enable customers to tailor data products to their needs, LALO uses a novel mechanism that enables live adaptation of data streams. Evaluation results show that the overhead for such an adaptation is negligible. Therefore, LALO represents an efficient solution for the appropriate handling of data products, both in terms of storage space and runtime.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2024-06&engl=0}
}
@inproceedings {INPROC-2024-04,
   author = {Jan Schneider and Arnold Lutsch and Christoph Gr{\"o}ger and Holger Schwarz and Bernhard Mitschang},
   title = {{First Experiences on the Application of Lakehouses in Industrial Practice}},
   booktitle = {Proceedings of the 35th GI-Workshop on Foundations of Databases (Grundlagen von Datenbanken), Herdecke, Germany},
   editor = {Uta St{\"o}rl},
   publisher = {CEUR Workshop Proceedings},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {CEUR Workshop Proceedings},
   volume = {3710},
   pages = {3--8},
   type = {Workshop-Beitrag},
   month = {Juni},
   year = {2024},
   isbn = {1613-0073},
   keywords = {Data Lakehouse; Data Platform; Platform Architecture; Data Analytics; Case Study; Industry Experience},
   language = {Englisch},
   cr-category = {H.3.4 Information Storage and Retrieval Systems and Software,     H.4.2 Information Systems Applications Types of Systems},
   ee = {https://ceur-ws.org/Vol-3710/paper1.pdf},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {In recent years, so-called lakehouses have emerged as a new type of data platform that intends to combine characteristics of data warehouses and data lakes. Although companies started to employ the associated concepts and technologies as part of their analytics architectures, little is known about their practical medium- and long-term experiences as well as proven architectural decisions. Additionally, there is only limited knowledge about how lakehouses can be utilized effectively in an industrial context. Hence, it remains unclear under which circumstances lakehouses represent a viable alternative to conventional data platforms. To address this gap, we conducted a case study on a real-world industrial case, in which manufacturing data needs to be managed and analytically exploited. Within the scope of this case, a dedicated analytics department has been testing and leveraging a lakehouse approach for several months in a productive environment with high data volumes and various types of analytical workloads. The paper at hand presents the results of our within-case analyses and focuses on the industrial setting of the case as well as the architecture of the utilized lakehouse. This way, it provides preliminary insights on the application of lakehouses in industrial practice and refers to useful architectural decisions.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2024-04&engl=0}
}
@inproceedings {INPROC-2024-03,
   author = {Andrea Fieschi and Pascal Hirmer and Sachin Agrawal and Christoph Stach and Bernhard Mitschang},
   title = {{HySAAD - A Hybrid Selection Approach for Anonymization by Design in the Automotive Domain}},
   booktitle = {Proceedings of the 25th IEEE International Conference on Mobile Data Management (MDM 2024)},
   editor = {Chiara Renso and Mahmoud Sakr and Walid G Aref and Ashley Song and Cheng Long},
   address = {Los Alamitos, Washington, Tokyo},
   publisher = {IEEE Computer Society Conference Publishing Services},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {203--210},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2024},
   isbn = {979-8-3503-7455-1},
   issn = {2375-0324},
   doi = {10.1109/MDM61037.2024.00044},
   keywords = {anonymization; connected vehicles; privacy protection; metrics},
   language = {Englisch},
   cr-category = {K.4.1 Computers and Society Public Policy Issues},
   contact = {Senden Sie eine E-Mail an \<andrea.fieschi@ipvs.uni-stuttgart.de\>.},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The increasing connectivity and data exchange between vehicles and the cloud have led to growing privacy concerns. To keep on gaining product insights through data collection while guaranteeing privacy protection, an anonymization-by-design approach should be used. A rising number of anonymization methods, not limited to the automotive domain, can be found in the literature and practice. The developers need support to select the suitable anonymization technique. To this end, we make the following two contributions: 1) We apply our knowledge from the automotive domain to outline the usage of qualitative metrics for anonymization techniques assessment; 2) We introduce HySAAD, a hybrid selection approach for anonymization by design that leverages this groundwork by recommending appropriate anonymization techniques for each mobile data analytics use case based on both, qualitative (i.e., {\ss}oft``) metrics and quantitative (i.e., ''hard``) metrics. Using a real-world use case from the automotive, we demonstrate the applicability and effectiveness of HySAAD.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2024-03&engl=0}
}
@inproceedings {INPROC-2024-02,
   author = {Yunxuan Li and Christoph Stach and Bernhard Mitschang},
   title = {{PaDS: An adaptive and privacy-enabling Data Pipeline for Smart Cars}},
   booktitle = {Proceedings of the 25th IEEE International Conference on Mobile Data Management (MDM 2024)},
   editor = {Chiara Renso and Mahmoud Sakr and Walid G Aref and Kyoung-Sook Kim and Manos Papagelis and Dimitris Sacharidis},
   address = {Los Alamitos, Washington, Tokyo},
   publisher = {IEEE Computer Society Conference Publishing Services},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {41--50},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2024},
   isbn = {979-8-3503-7455-1},
   issn = {2375-0324},
   doi = {10.1109/MDM61037.2024.00026},
   keywords = {smart car; privacy-enabling data pipeline; datastream runtime adaptation; mobile data privacy management},
   language = {Englisch},
   cr-category = {K.4.1 Computers and Society Public Policy Issues},
   contact = {Senden Sie eine E-Mail an \<yunxuan.li@ipvs.uni-stuttgart.de\>.},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The extensive use of onboard sensors in smart cars enables the collection, processing, and dissemination of large amounts of mobile data containing information about the vehicle, its driver, and even bystanders. Despite the undoubted benefits of such smart cars, this leads to significant privacy concerns. Due to their inherent mobility, the situation of smart cars changes frequently, and with it, the appropriate measures to counteract the exposure of private data. However, data management in such vehicles lacks sufficient support for this privacy dynamism. We therefore introduce PaDS, a framework for Privacy adaptive Data Stream. The focus of this paper is to enable adaptive data processing within the vehicle data stream. With PaDS, Privacy-Enhancing Technologies can be deployed dynamically in the data pipeline of a smart car according to the current situation without user intervention. With a comparison of state-of-the-art approaches, we demonstrate that our solution is very efficient as it does not require a complete restart of the data pipeline. Moreover, compared to a static approach, PaDS causes only minimal overhead despite its dynamic adaptation of the data pipeline to react to changing privacy requirements. This renders PaDS an effective privacy solution for smart cars.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2024-02&engl=0}
}
@inproceedings {INPROC-2024-01,
   author = {Dennis Przytarski and Christoph Stach and Bernhard Mitschang},
   title = {{Assessing Data Layouts to Bring Storage Engine Functionality to Blockchain Technology}},
   booktitle = {Proceedings of the 57th Hawaii International Conference on System Sciences (HICSS '24)},
   editor = {Tung X. Bui},
   publisher = {ScholarSpace},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {5091--5100},
   type = {Konferenz-Beitrag},
   month = {Januar},
   year = {2024},
   isbn = {978-0-9981331-7-1},
   keywords = {blockchain; storage engine; queries},
   language = {Englisch},
   cr-category = {H.3.1 Content Analysis and Indexing,     H.3.2 Information Storage,     H.3.3 Information Search and Retrieval},
   ee = {https://hdl.handle.net/10125/106995},
   contact = {Senden Sie eine E-Mail an \<Christoph.Stach@ipvs.uni-stuttgart.de\>.},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Nowdays, modern applications often use blockchains as a secure data store. However, querying blockchain data is more challenging than querying conventional databases due to blockchains being primarily designed for the logging of asset transfers, such as cryptocurrencies, rather than storing and reading generic data. To improve the experience of querying blockchain data and make it comparable to querying conventional databases, new design approaches of the storage engine for blockchain technology are required. An important aspect is the data layout of a block, as it plays a crucial role in facilitating reading of blockchain data. In this paper, we identify a suitable data layout that provides the required query capabilities while preserving the key properties of blockchain technology. Our goal is to overcome the limitations of current data access models in blockchains, such as the reliance on auxiliary data storages and error-prone smart contracts. To this end, we compare four promising data layouts with data models derived from document, row, column, and triple stores in terms of schema flexibility, read pattern generality, and relational algebra suitability. We then assess the most suitable data layout for blockchain technology.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2024-01&engl=0}
}
@inproceedings {INPROC-2023-07,
   author = {Andrea Fieschi and Yunxuan Li and Pascal Hirmer and Christoph Stach and Bernhard Mitschang},
   title = {{Privacy in Connected Vehicles: Perspectives of Drivers and Car Manufacturers}},
   booktitle = {Service-Oriented Computing: 17th Symposium and Summer School, SummerSOC 2023, Heraklion, Crete, Greece, June 25 – July 1, 2023, Revised Selected Papers},
   editor = {Marco Aiello and Johanna Barzen and Schahram Dustdar and Frank Leymann},
   address = {Cham},
   publisher = {Springer Nature Switzerland},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Communications in Computer and Information Science},
   volume = {1847},
   pages = {59--68},
   type = {Konferenz-Beitrag},
   month = {Oktober},
   year = {2023},
   isbn = {978-3-031-45727-2},
   doi = {10.1007/978-3-031-45728-9_4},
   keywords = {Connected Vehicles; Privacy; Anonymization},
   language = {Englisch},
   cr-category = {K.4.1 Computers and Society Public Policy Issues},
   contact = {Senden Sie eine E-Mail an Andrea Fieschi (Andrea.Fieschi@ipvs.uni-stuttgart.de) oder Yunxuan Li (Yunxuan.Li@ipvs.uni-stuttgart.de).},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The digital revolution has led to significant technological advancements in the automotive industry, enabling vehicles to process and share information with other vehicles and the cloud. However, as data sharing becomes more prevalent, privacy protection has become an essential issue. In this paper, we explore various privacy challenges regarding different perspectives of drivers and car manufacturers. We also propose general approaches to overcome these challenges with respect to their individual needs. Finally, we highlight the importance of collaboration between drivers and car manufacturers to establish trust and achieve better privacy protection.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2023-07&engl=0}
}
@inproceedings {INPROC-2023-06,
   author = {Jan Schneider and Christoph Gr{\"o}ger and Arnold Lutsch and Holger Schwarz and Bernhard Mitschang},
   title = {{Assessing the Lakehouse: Analysis, Requirements and Definition}},
   booktitle = {Proceedings of the 25th International Conference on Enterprise Information Systems, ICEIS 2023, Volume 1, Prague, Czech Republic, April 24-26, 2023},
   editor = {Joaquim Filipe and Michal Smialek and Alexander Brodsky and Slimane Hammoudi},
   address = {Prague},
   publisher = {SciTePress},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {44--56},
   type = {Konferenz-Beitrag},
   month = {Mai},
   year = {2023},
   isbn = {978-989-758-648-4},
   issn = {2184-4992},
   doi = {10.5220/0011840500003467},
   keywords = {Lakehouse; Data Warehouse; Data Lake; Data Management; Data Analytics},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems,     H.2.7 Database Administration,     H.2.8 Database Applications},
   ee = {https://www.scitepress.org/PublicationsDetail.aspx?ID=9ydI3Lyl2Fk=,     https://doi.org/10.5220/0011840500003467},
   contact = {jan.schneider@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The digital transformation opens new opportunities for enterprises to optimize their business processes by applying data-driven analysis techniques. For storing and organizing the required huge amounts of data, different types of data platforms have been employed in the past, with data warehouses and data lakes being the most prominent ones. Since they possess rather contrary characteristics and address different types of analytics, companies typically utilize both of them, leading to complex architectures with replicated data and slow analytical processes. To counter these issues, vendors have recently been making efforts to break the boundaries and to combine features of both worlds into integrated data platforms. Such systems are commonly called lakehouses and promise to simplify enterprise analytics architectures by serving all kinds of analytical workloads from a single platform. However, it remains unclear how lakehouses can be characterized, since existing definitions focus al most arbitrarily on individual architectural or functional aspects and are often driven by marketing. In this paper, we assess prevalent definitions for lakehouses and finally propose a new definition, from which several technical requirements for lakehouses are derived. We apply these requirements to several popular data management tools, such as Delta Lake, Snowflake and Dremio in order to evaluate whether they enable the construction of lakehouses.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2023-06&engl=0}
}
@inproceedings {INPROC-2023-05,
   author = {Thomas Ackermann and Robert Miehe and Peter Reimann and Bernhard Mitschang and Ralf Takors and Thomas Bauernhansl},
   title = {{A Cross-Disciplinary Training Concept for Future Technologists in the Dawn of Biointelligent Production Systems}},
   booktitle = {Procedia CIRP: Proceedings of 13th CIRP Conference on Learning Factories (CIRP CLF)},
   publisher = {Elsevier BV},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   type = {Konferenz-Beitrag},
   month = {Mai},
   year = {2023},
   keywords = {Biointelligent systems; Biological transformation; Converging technologies; Qualification},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Biologicalization is considered one of the most important transformation processes in industry alongside digitalization. This work presents a qualification concept within the Stuttgart Biointelligent Manufacturing Framework (BioMEFUS), which is intended to provide skills and experiences at the intersections between manufacturing and process engineering, computer science and life science. Life cycle management, production methods and engineering of components towards the development and implementation of biointelligent systems are considered as the major engineering platforms of the framework. The qualification concept is developed for early stage researchers (ESRs) at the doctorate stage. It provides a mapping of individual research projects in the field of biointelligent production systems and contains subject-related and methodological building blocks for the formation of future experts and decision-makers in the course of biological transformation.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2023-05&engl=0}
}
@inproceedings {INPROC-2023-04,
   author = {Julius Voggesberger and Peter Reimann and Bernhard Mitschang},
   title = {{Towards the Automatic Creation of Optimized Classifier Ensembles}},
   booktitle = {Proceedings of the 25th International Conference on Enterprise Information Systems (ICEIS 2023)},
   publisher = {SciTePress},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {614--621},
   type = {Konferenz-Beitrag},
   month = {April},
   year = {2023},
   keywords = {Classifier Ensembles; Classifier Diversity; Decision Fusion; AutoML; Machine Learning},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Classifier ensemble algorithms allow for the creation of combined machine learning models that are more accurate and generalizable than individual classifiers. However, creating such an ensemble is complex, as several requirements must be fulfilled. An expert has to select multiple classifiers that are both accurate and diverse. In addition, a decision fusion algorithm must be selected to combine the predictions of these classifiers into a consensus decision. Satisfying these requirements is challenging even for experts, as it requires a lot of time and knowledge. In this position paper, we propose to automate the creation of classifier ensembles. While there already exist several frameworks that automatically create multiple classifiers, none of them meet all requirements to build optimized ensembles based on these individual classifiers. Hence, we introduce and compare three basic approaches that tackle this challenge. Based on the comparison results, we propose one of the approaches that best meets the requirements to lay the foundation for future work.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2023-04&engl=0}
}
@inproceedings {INPROC-2023-03,
   author = {Yannick Wilhelm and Peter Reimann and Wolfgang Gauchel and Steffen Klein and Bernhard Mitschang},
   title = {{PUSION- A Generic and Automated Framework for Decision Fusion}},
   booktitle = {Proceedings of the 39th IEEE International Conference on Data Engineering (ICDE 2023)},
   publisher = {IEEE},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   type = {Konferenz-Beitrag},
   month = {April},
   year = {2023},
   keywords = {Classifier ensembles; decision fusion; automated decision fusion; hybrid fault diagnosis},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Combining two or more classifiers into an ensemble and fusing the individual classifier decisions to a consensus decision can improve the accuracy for a classification problem. The classification improvement of the fusion result depends on numerous factors, such as the data set, the combination scenario, the decision fusion algorithm, as well as the prediction accuracies and diversity of the multiple classifiers to be combined. Due to these factors, the best decision fusion algorithm for a given decision fusion problem cannot be generally determined in advance. In order to support the user in combining classifiers and to achieve the best possible fusion result, we propose the PUSION (Python Universal fuSION) framework, a novel generic and automated framework for decision fusion of classifiers. The framework includes 14 decision fusion algorithms and covers a total of eight different combination scenarios for both multi-class and multi-label classification problems. The introduced concept of AutoFusion detects the combination scenario for a given use case, automatically selects the applicable decision fusion algorithms and returns the decision fusion algorithm that leads to the best fusion result. The framework is evaluated with two real-world case studies in the field of fault diagnosis. In both case studies, the consensus decision of multiple classifiers and heterogeneous fault diagnosis methods significantly increased the overall classification accuracy. Our evaluation results show that our framework is of practical relevance and reliably finds the best performing decision fusion algorithm for a given combination task.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2023-03&engl=0}
}
@inproceedings {INPROC-2023-02,
   author = {Dennis Treder-Tschechlov and Peter Reimann and Holger Schwarz and Bernhard Mitschang},
   title = {{Approach to Synthetic Data Generation for Imbalanced Multi-class Problems with Heterogeneous Groups}},
   booktitle = {Tagungsband der 20. Fachtagung Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2019)},
   publisher = {GI Gesellschaft f{\"u}r Informatik e.V. (GI)},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Lecture Notes in Informatics (LNI)},
   pages = {329--351},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {2023},
   keywords = {Machine learning; classification; data generation; real-world data characteristics},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications},
   ee = {https://dl.gi.de/bitstream/handle/20.500.12116/40320/B3-5.pdf?},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {To benchmark novel classification algorithms, these algorithms should be evaluated on data with characteristics that also appear in real-world use cases. Important data characteristics that often lead to challenges for classification approaches are multi-class imbalance and heterogeneous groups. Heterogeneous groups are sets of real-world entities, where the classification patterns may vary among different groups and where the groups are typically imbalanced in the data. Real-world data that comprise these characteristics are usually not publicly available, e.g., because they constitute sensitive patient information or due to privacy concerns. Further, the manifestations of the characteristics cannot be controlled specifically on real-world data. A more rigorous approach is to synthetically generate data such that different manifestations of the characteristics can be controlled as well. However, existing data generators are not able to generate data that feature both data characteristics, i.e., multi-class imbalance and heterogeneous groups. In this paper, we propose an approach that fills this gap as it allows to synthetically generate data that exhibit both characteristics. We make use of a taxonomy model that organizes real-world entities in domain-specific heterogeneous groups to generate data reflecting the characteristics of these groups. Further, we incorporate probability distributions to reflect the imbalances of multiple classes and groups from real-world use cases. The evaluation shows that our approach can generate data that feature the data characteristics multi-class imbalance and heterogeneous groups and that it allows to control different manifestations of these characteristics.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2023-02&engl=0}
}
@inproceedings {INPROC-2022-08,
   author = {Rebecca Eichler and Christoph Gr{\"o}ger and Eva Hoos and Christoph Stach and Holger Schwarz and Bernhard Mitschang},
   title = {{Establishing the Enterprise Data Marketplace: Characteristics, Architecture, and Challenges}},
   booktitle = {Proceedings of the Workshop on Data Science for Data Marketplaces in Conjunction with the 48th International Conference on Very Large Data Bases},
   editor = {Xiaohui Yu and Jian Pei},
   publisher = {-},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--12},
   type = {Workshop-Beitrag},
   month = {September},
   year = {2022},
   language = {Englisch},
   cr-category = {E.m Data Miscellaneous,     H.3.7 Digital Libraries,     H.4.m Information Systems Applications Miscellaneous},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Companies today have increasing amounts of data at their disposal, most of which is not used, leaving the data value unexploited. In order to leverage the data value, the data must be democratized, i.e., made available to the company employees. In this context, the use of enterprise data marketplaces, platforms for trading data within a company, are proposed. However, specifics of enterprise data marketplaces and how these can be implemented have not been investigated in literature so far. To shed light on these topics, we illustrate the characteristics of an enterprise data marketplace and highlight according marketplace requirements. We provide an enterprise data marketplace architecture, discuss how it integrates into a company's system landscape and present an enterprise data marketplace prototype. Finally, we examine organizational and technical challenges which arise when operating a marketplace in the enterprise context. In this paper, we thereby present the enterprise data marketplace as a distinct marketplace type and provide the basis for establishing it within a company.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2022-08&engl=0}
}
@inproceedings {INPROC-2022-07,
   author = {Yunxuan Li and Pascal Hirmer and Christoph Stach and Bernhard Mitschang},
   title = {{Ensuring Situation-Aware Privacy for Connected Vehicles}},
   booktitle = {Proceedings of the 12th International Conference on the Internet of Things (IoT); Delft, Netherlands, November 7 - 10, 2022},
   editor = {Evangelos Niforatos and Gerd Kortuem and Nirvana Meratnia and Josh Siegel and Florian Michahelles},
   address = {New York, NY, USA},
   publisher = {ACM},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {135--138},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2022},
   isbn = {978-1-4503-9665-3},
   doi = {10.1145/3567445.3569163},
   keywords = {Connected Vehicle; Situation-Awareness; Privacy-Preserving},
   language = {Englisch},
   cr-category = {K.4.1 Computers and Society Public Policy Issues,     K.6.5 Security and Protection},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {As technology advances in new sensors and software, modern vehicles become increasingly intelligent. To date, connected vehicles can collect, process, and share data with other entities in connected vehicle environments. However, in terms of data collection and exchange, privacy becomes a central issue. It is challenging to preserve privacy in connected vehicle environments when the privacy demands of drivers could change from situation to situation even for the same service. In this paper, we analyze the requirements for a privacy-preserving system in connected vehicle environments with a focus on situation-awareness and safety aspects. Based on the analysis, we propose a novel situation-aware privacy-preserving framework for connected vehicles. Our framework supports individual privacy protections for specific end-point services and situation-aware privacy protections for different circumstances.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2022-07&engl=0}
}
@inproceedings {INPROC-2022-06,
   author = {Julian Ziegler and Peter Reimann and Christoph Schulz and Florian Keller and Bernhard Mitschang},
   title = {{A Graph Structure to Discover Patterns in Unstructured Processes of Product Development}},
   booktitle = {Proceedings of the 23rd International Conference on Information Reuse and Integration for Data Science (IRI 2022)},
   publisher = {IEEE},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   type = {Konferenz-Beitrag},
   month = {August},
   year = {2022},
   keywords = {Process Discovery; Unstructured Processes; Process Patterns; Graph Data; Frequent Subgraph Mining},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {A well-known information reuse approach is to exploit event logs for process discovery and optimization. However, process discovery is rarely used for product development projects. This is because information systems in product development, e. g., Product-Lifecycle-Management (PLM) systems, do not provide the event logs required by process discovery algorithms. Additionally, existing algorithms struggle with development projects, as these are unstructured and rich in variety. In this paper, we propose a novel approach to process discovery in order to make it applicable and tailored to product development projects. Instead of using flat event logs, we provide a graph-based data structure that is able to represent both activities and data of product development projects with the dataflow between activities. Based on this structure, we can leverage provenance available in PLM systems. Furthermore, we may use frequent subgraph mining to discover process patterns. Such patterns are well suited to describe different variants and common sub-processes of unstructured processes. Using a prototype, we evaluate this approach and successfully discover prevailing patterns. These patterns may be used by engineers to support their decision-making or help improve the execution of development projects.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2022-06&engl=0}
}
@inproceedings {INPROC-2022-05,
   author = {Rebecca Eichler and Christoph Gr{\"o}ger and Eva Hoos and Holger Schwarz and Bernhard Mitschang},
   title = {{Data Shopping — How an Enterprise Data Marketplace Supports Data Democratization in Companies}},
   booktitle = {Proceedings of the 34th International Conference on Intelligent Information Systems},
   editor = {Jochen De Weerdt and Artem Polyvyanyy},
   address = {Stuttgart},
   publisher = {Springer International Publishing},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Rebecca Eichler},
   pages = {19--26},
   type = {Konferenz-Beitrag},
   month = {Mai},
   year = {2022},
   isbn = {https://doi.org/10.1007/978-3-031-07481-3_3},
   keywords = {Data Marketplace; Data Sharing; Data Democratization},
   language = {Englisch},
   cr-category = {H.0 Information Systems General},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {To exploit the company's data value, employees must be able to find, understand and access it. The process of making corporate data available to the majority of the company's employees is referred to as data democratization. In this work, we present the current state and challenges of data democratization in companies, derived from a comprehensive literature study and expert interviews we conducted with a manufacturer. In this context a data consumer's journey is presented that reflects the required steps, tool types and roles for finding, understanding and accessing data in addition to revealing three data democratization challenges. To address these challenges we propose the use of an enterprise data marketplace, a novel type of information system for sharing data within the company. We developed a prototype based on which a suitability assessment of a data marketplace yields an improved consumer journey and demonstrates that the marketplace addresses the data democratization challenges and consequently, shows that the marketplace is suited for realizing data democratization.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2022-05&engl=0}
}
@inproceedings {INPROC-2022-03,
   author = {Marco Spie{\ss} and Peter Reimann and Christian Weber and Bernhard Mitschang},
   title = {{Analysis of Incremental Learning andWindowing to handle Combined Dataset Shifts on Binary Classification for Product Failure Prediction}},
   booktitle = {Proceedings of the 24th International Conference on Enterprise Information Systems (ICEIS 2022)},
   publisher = {SciTePress},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   type = {Konferenz-Beitrag},
   month = {April},
   year = {2022},
   keywords = {Binary Classification; Dataset Shift; Incremental Learning; Product Failure Prediction; Windowing.},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Dataset Shifts (DSS) are known to cause poor predictive performance in supervised machine learning tasks. We present a challenging binary classification task for a real-world use case of product failure prediction. The target is to predict whether a product, e. g., a truck may fail during the warranty period. However, building a satisfactory classifier is difficult, because the characteristics of underlying training data entail two kinds of DSS. First, the distribution of product configurations may change over time, leading to a covariate shift. Second, products gradually fail at different points in time, so that the labels in training data may change, which may a concept shift. Further, both DSS show a trade-off relationship, i. e., addressing one of them may imply negative impacts on the other one. We discuss the results of an experimental study to investigate how different approaches to addressing DSS perform when they are faced with both a covariate and a concept shift. Thereby, we prove that existing approaches, e. g., incremental learning and windowing, especially suffer from the trade-off between both DSS. Nevertheless, we come up with a solution for a data-driven classifier that yields better results than a baseline solution that does not address DSS.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2022-03&engl=0}
}
@inproceedings {INPROC-2022-01,
   author = {Christoph Stach and Cl{\'e}mentine Gritti and Dennis Przytarski and Bernhard Mitschang},
   title = {{Can Blockchains and Data Privacy Laws be Reconciled? A Fundamental Study of How Privacy-Aware Blockchains are Feasible}},
   booktitle = {Proceedings of the 37th ACM/SIGAPP Symposium On Applied Computing},
   editor = {Jiman Hong and Miroslav Bures and Ronald Petrlic and Christoph Sorge},
   address = {Brno},
   publisher = {ACM},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--10},
   type = {Konferenz-Beitrag},
   month = {April},
   year = {2022},
   isbn = {978-1-4503-8713-2},
   doi = {10.1145/3477314.3506986},
   keywords = {blockchains; immutable; tamper-proof; GDPR; privacy assessment},
   language = {Englisch},
   cr-category = {K.4.1 Computers and Society Public Policy Issues,     K.6.5 Security and Protection},
   contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de.},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Due to the advancing digitalization, the importance of data is constantly increasing. Application domains such as smart cars, smart cities, or smart healthcare rely on the permanent availability of large amounts of data to all parties involved. As a result, the value of data increases, making it a lucrative target for cyber-attacks. Particularly when human lives depend on the data, additional protection measures are therefore important for data management and provision. Blockchains, i.e., decentralized, immutable, and tamper-proof data stores, are becoming increasingly popular for this purpose. Yet, from a data protection perspective, the immutable and tamper-proof properties of blockchains pose a privacy concern. In this paper, we therefore investigate whether blockchains are in compliance with the General Data Protection Regulation (GDPR) if personal data are involved. To this end, we elaborate which articles of the GDPR are relevant in this regard and present technical solutions for those legal requirements with which blockchains are in conflict. We further identify open research questions that need to be addressed in order to achieve a privacy-by-design blockchain system.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2022-01&engl=0}
}
@inproceedings {INPROC-2021-11,
   author = {Christoph Stach and Julia Br{\"a}cker and Rebecca Eichler and Corinna Giebler and Bernhard Mitschang},
   title = {{Demand-Driven Data Provisioning in Data Lakes: BARENTS - A Tailorable Data Preparation Zone}},
   booktitle = {Proceedings of the 23rd International Conference on Information Integration and Web-based Applications \& Services (iiWAS2021); Linz, Austria, November 29-December 1, 2021},
   editor = {Maria Indrawan-Santiago and Eric Pardede and Ivan Luiz Salvadori and Matthias Steinbauer and Ismail Khalil and Gabriele Kotsis},
   address = {New York, NY, United States},
   publisher = {Association for Computing Machinery (ACM)},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--12},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2021},
   isbn = {978-1-4503-9556-4/21/11},
   doi = {10.1145/3487664.3487784},
   keywords = {data pre-processing; data transformation; knowledge modeling; ontology; data management; Data Lakes; zone model; food analysis},
   language = {Englisch},
   cr-category = {H.2.7 Database Administration,     E.2 Data Storage Representations,     H.3.3 Information Search and Retrieval,     H.2.8 Database Applications},
   contact = {Senden Sie eine E-Mail an christoph.stach@ipvs.uni-stuttgart.de.},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Data has never been as significant as it is today. It can be acquired virtually at will on any subject. Yet, this poses new challenges towards data management, especially in terms of storage (data is not consumed during processing, i.e., the data volume keeps growing), flexibility (new applications emerge), and operability (analysts are no IT experts). The goal has to be a demand-driven data provisioning, i.e., the right data must be available in the right form at the right time. Therefore, we introduce a tailorable data preparation zone for Data Lakes called BARENTS. It enables users to model in an ontology how to derive information from data and assign the information to use cases. The data is automatically processed based on this model and the refined data is made available to the appropriate use cases. Here, we focus on a resource-efficient data management strategy. BARENTS can be embedded seamlessly into established Big Data infrastructures, e.g., Data Lakes.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2021-11&engl=0}
}
@inproceedings {INPROC-2021-10,
   author = {Alejandro Villanueva Zacarias and Christian Weber and Peter Reimann and Bernhard Mitschang},
   title = {{AssistML: A Concept to Recommend ML Solutions for Predictive Use Cases}},
   booktitle = {Proceedings of the 8th IEEE International Conference on Data Science and Advanced Analytics (DSAA 2021)},
   address = {Porto, Portugal},
   publisher = {IEEE},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   type = {Konferenz-Beitrag},
   month = {Oktober},
   year = {2021},
   keywords = {Recommender Systems; Machine Learning; Meta Learning},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The adoption of machine learning (ML) in organizations is characterized by the use of multiple ML software components. Citizen data scientists face practical requirements when building ML systems, which go beyond the known challenges of ML, e. g., data engineering or parameter optimization. They are expected to quickly identify ML system options that strike a suitable trade-off across multiple performance criteria. These options also need to be understandable for non-technical users. Addressing these practical requirements represents a problem for citizen data scientists with limited ML experience. This calls for a method to help them identify suitable ML software combinations. Related work, e. g., AutoML systems, are not responsive enough or cannot balance different performance criteria. In this paper, we introduce AssistML, a novel concept to recommend ML solutions, i. e., software systems with ML models, for predictive use cases. AssistML uses metadata of existing ML solutions to quickly identify and explain options for a new use case. We implement the approach and evaluate it with two exemplary use cases. Results show that AssistML proposes ML solutions that are in line with users{\^a}€™ performance preferences in seconds.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2021-10&engl=0}
}
@inproceedings {INPROC-2021-07,
   author = {Julian Ziegler and Peter Reimann and Florian Keller and Bernhard Mitschang},
   title = {{A Metadata Model to Connect Isolated Data Silos and Activities of the CAE Domain}},
   booktitle = {Proceedings of the 33rd International Conference on Advanced Information Systems Engineering (CAiSE)},
   publisher = {Springer International Publishing},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {213--228},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2021},
   keywords = {Metadata Models; Graphs; Computer-aided Engineering},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Computer-aided engineering (CAE) applications support the digital transformation of the manufacturing industry. They facilitate virtual product development and product testing via computer simulations. CAE applications generate vast quantities of heterogeneous data. Domain experts struggle to access and analyze them, because such engineering data are not sufficiently described with metadata. In this paper, we characterize the CAE domain and identify unsolved challenges for a tailored data and metadata management. For instance, work activities in product development projects and their relationships to data are not represented explicitly in current metadata models. We propose a metadata model that addresses all challenges and provides a connected view on all CAE data, metadata, and work activities of development projects. We validate the feasibility of our metadata model through a prototypical implementation and its application to a real-world use case. This verifies that our metadata model addresses the CAE-specific challenges and this way eases the task of domain experts to exploit relevant data.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2021-07&engl=0}
}
@inproceedings {INPROC-2021-06,
   author = {Rebecca Eichler and Corinna Giebler and Christoph Gr{\"o}ger and Eva Hoos and Holger Schwarz and Bernhard Mitschang},
   title = {{Enterprise-Wide Metadata Management - An Industry Case on the Current State and Challenges}},
   booktitle = {24thInternational Conference on Business Information Systems},
   editor = {Witold Abramowicz and S{\"o}ren Auer and Lewa\&\#324 and El\&\#380 Ska and Bieta},
   publisher = {TIB Open Publishing},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {269--279},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2021},
   doi = {https://doi.org/10.52825/bis.v1i.47},
   language = {Englisch},
   cr-category = {A.0 General Literature, General},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Abstract. Metadata management is a crucial success factor for companies today, as for example, it enables exploiting data value fully or enables legal compliance. With the emergence of new concepts, such as the data lake, and new objectives, such as the enterprise-wide sharing of data, metadata management has evolved and now poses a renewed challenge for companies. In this context, we interviewed a globally active manufacturer to reveal how metadata management is implemented in practice today and what challenges companies are faced with and whether these constitute research gaps. As an outcome, we present the company{\^a}€™s metadata management goals and their corresponding solution approaches and challenges. An evaluation of the challenges through a literature and tool review yields three research gaps, which are concerned with the topics: (1) metadata management for data lakes, (2) categorizations and compositions of metadata management tools for comprehensive metadata management, and (3) the use of data marketplaces as metadata-driven exchange platforms within an enterprise. The gaps lay the groundwork for further research activities in the field of metadata management and the industry case represents a starting point for research to realign with real-world industry needs.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2021-06&engl=0}
}
@inproceedings {INPROC-2021-05,
   author = {Corinna Giebler and Christoph Gr{\"o}ger and Eva Hoos and Rebecca Eichler and Holger Schwarz and Bernhard Mitschang},
   title = {{The Data Lake Architecture Framework}},
   booktitle = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2021), 19. Fachtagung des GI-Fachbereichs Datenbanken und Informationssysteme (DBIS), 13.-17. September 2021, Dresden, Germany},
   publisher = {Gesellschaft f{\"u}r Informatik},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {351--370},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2021},
   doi = {10.18420/btw2021-19},
   language = {Englisch},
   cr-category = {H.4 Information Systems Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {During recent years, data lakes emerged as a way to manage large amounts of heterogeneous data for modern data analytics. Although various work on individual aspects of data lakes exists, there is no comprehensive data lake architecture yet. Concepts that describe themselves as a {\^a}€œdata lake architecture{\^a}€ are only partial. In this work, we introduce the data lake architecture framework. It supports the definition of data lake architectures by defining nine architectural aspects, i.e., perspectives on a data lake, such as data storage or data modeling, and by exploring the interdependencies between these aspects. The included methodology helps to choose appropriate concepts to instantiate each aspect. To evaluate the framework, we use it to configure an exemplary data lake architecture for a real-world data lake implementation. This final assessment shows that our framework provides comprehensive guidance in the configuration of a data lake architecture.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2021-05&engl=0}
}
@inproceedings {INPROC-2020-55,
   author = {Corinna Giebler and Christoph Gr{\"o}ger and Eva Hoos and Holger Schwarz and Bernhard Mitschang},
   title = {{A Zone Reference Model for Enterprise-Grade Data Lake Management}},
   booktitle = {Proceedings of the 24th IEEE Enterprise Computing Conference},
   publisher = {IEEE},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {57--66},
   type = {Konferenz-Beitrag},
   month = {Oktober},
   year = {2020},
   keywords = {Data Lake; Zones; Reference Model; Industry Case; Industry Experience},
   language = {Englisch},
   cr-category = {H.4 Information Systems Applications},
   contact = {Senden Sie eine E-Mail an corinna.giebler@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Data lakes are on the rise as data platforms for any kind of analytics, from data exploration to machine learning. They achieve the required flexibility by storing heterogeneous data in their raw format, and by avoiding the need for pre-defined use cases. However, storing only raw data is inefficient, as for many applications, the same data processing has to be applied repeatedly. To foster the reuse of processing steps, literature proposes to store data in different degrees of processing in addition to their raw format. To this end, data lakes are typically structured in zones. There exists various zone models, but they are varied, vague, and no assessments are given. It is unclear which of these zone models is applicable in a practical data lake implementation in enterprises. In this work, we assess existing zone models using requirements derived from multiple representative data analytics use cases of a real-world industry case. We identify the shortcomings of existing work and develop a zone reference model for enterprise-grade data lake management in a detailed manner. We assess the reference model's applicability through a prototypical implementation for a real-world enterprise data lake use case. This assessment shows that the zone reference model meets the requirements relevant in practice and is ready for industry use.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-55&engl=0}
}
@inproceedings {INPROC-2020-50,
   author = {Rebecca Eichler and Corinna Giebler and Christoph Gr{\"o}ger and Holger Schwarz and Bernhard Mitschang},
   title = {{HANDLE - A Generic Metadata Model for Data Lakes}},
   booktitle = {Big Data Analytics and Knowledge Discovery: 22nd International Conference, DaWaK 2020, Bratislava, Slovakia, September 14–17, 2020, Proceedings},
   publisher = {Springer, Cham},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {73--88},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2020},
   language = {Englisch},
   cr-category = {H.2 Database Management},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The substantial increase in generated data induced the development of new concepts such as the data lake. A data lake is a large storage repository designed to enable flexible extraction of the data{\^a}€™s value. A key aspect of exploiting data value in data lakes is the collection and management of metadata. To store and handle the metadata, a generic metadata model is required that can reflect metadata of any potential metadata management use case, e.g., data versioning or data lineage. However, an evaluation of existent metadata models yields that none so far are sufficiently generic. In this work, we present HANDLE, a generic metadata model for data lakes, which supports the flexible integration of metadata, data lake zones, metadata on various granular levels, and any metadata categorization. With these capabilities HANDLE enables comprehensive metadata management in data lakes. We show HANDLE{\^a}€™s feasibility through the application to an exemplary access-use-case and a prototypical implementation. A comparison with existent models yields that HANDLE can reflect the same information and provides additional capabilities needed for metadata management in data lakes.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-50&engl=0}
}
@inproceedings {INPROC-2020-48,
   author = {Dennis Przytarski and Christoph Stach and Cl{\'e}mentine Gritti and Bernhard Mitschang},
   title = {{A Blueprint for a Trustworthy Health Data Platform Encompassing IoT and Blockchain Technologies}},
   booktitle = {Proceedings of the ISCA 29th International Conference on Software Engineering and Data Engineering (Las Vegas, October 2020)},
   publisher = {ISCA in Cooperation with IEEE Computer Society},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Germany},
   pages = {1--10},
   type = {Konferenz-Beitrag},
   month = {Oktober},
   year = {2020},
   language = {Englisch},
   cr-category = {H.2.7 Database Administration,     K.6.5 Security and Protection},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {eHealth provides great relief for patients and physicians. This means, patients autonomously monitor their condition via IoT medical devices and make these data available to physicians for analyses. This requires a data platform that takes care of data acquisition, management, and provisioning. As health data are highly sensitive, there are major concerns regarding data security with respect to confidentiality, integrity, and authenticity. To this end, we present a blueprint for constructing a trustworthy health data platform called SEAL. It provides a lightweight attribute-based authentication mechanism for IoT devices to validate all involved data sources, there is a fine-grained data provisioning system to enable data provision according to actual requirements, and a verification procedure ensures that data cannot be manipulated.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-48&engl=0}
}
@inproceedings {INPROC-2020-45,
   author = {Rebecca Eichler and Corinna Giebler and Christoph Gr{\"o}ger and Holger Schwarz and Bernhard Mitschang},
   title = {{HANDLE - A Generic Metadata Model for Data Lakes}},
   booktitle = {Big Data Analytics and Knowledge Discovery},
   editor = {Min Song and Il-Yeol Song and Gabriele Kotsis and A Min Tjoa and Ismail Khalil},
   publisher = {Springer Nature Switzerland AG},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Lecture Notes in Computer Science},
   volume = {12393},
   pages = {73--88},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2020},
   doi = {https://doi.org/10.1007/978-3-030-59065-9_7},
   keywords = {Metadata management; Metadata model; Data lake},
   language = {Englisch},
   cr-category = {H.2 Database Management},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2020-45/INPROC-2020-45.pdf},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The substantial increase in generated data induced the development of new concepts such as the data lake. A data lake is a large storage repository designed to enable flexible extraction of the data's value. A key aspect of exploiting data value in data lakes is the collection and management of metadata. To store and handle the metadata, a generic metadata model is required that can reflect metadata of any potential metadata management use case, e.g., data versioning or data lineage. However, an evaluation of existent metadata models yields that none so far are sufficiently generic. In this work, we present HANDLE, a generic metadata model for data lakes, which supports the flexible integration of metadata, data lake zones, metadata on various granular levels, and any metadata categorization. With these capabilities HANDLE enables comprehensive metadata management in data lakes. We show HANDLE's feasibility through the application to an exemplary access-use-case and a prototypical implementation. A comparison with existent models yields that HANDLE can reflect the same information and provides additional capabilities needed for metadata management in data lakes.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-45&engl=0}
}
@inproceedings {INPROC-2020-37,
   author = {Mathias Mormul and Pascal Hirmer and Christoph Stach and Bernhard Mitschang},
   title = {{DEAR: Distributed Evaluation of Alerting Rules}},
   booktitle = {IEEE 13th International Conference on Cloud Computing (CLOUD)},
   publisher = {IEEE},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--1},
   type = {Konferenz-Beitrag},
   month = {Dezember},
   year = {2020},
   keywords = {cloud monitoring; agent-based; alerting},
   language = {Englisch},
   cr-category = {H.0 Information Systems General},
   contact = {mathias.mormul@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Cloud computing passed the hype cycle long ago and firmly established itself as a future technology since then. However, to utilize the cloud as cost-efficiently as possible, a continuous monitoring is key to prevent an over- or undercommissioning of resources. In large-scaled scenarios, several challenges for cloud monitoring, such as high network traffic volume, low accuracy of monitoring data, and high time-toinsight, require new approaches in IT Operations while considering administrative complexity. To handle these challenges, we present DEAR, the Distributed Evaluation of Alerting Rules. DEAR is a plugin for monitoring systems which automatically distributes alerting rules to the monitored resources to solve the trade-off between high accuracy and low network traffic volume without administrative overhead. We evaluate our approach against requirements of today{\^a}€™s IT monitoring and compare it to conventional agent-based monitoring approaches.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-37&engl=0}
}
@inproceedings {INPROC-2020-32,
   author = {Vitali Hirsch and Peter Reimann and Bernhard Mitschang},
   title = {{Exploiting Domain Knowledge to Address Multi-Class Imbalance and a Heterogeneous Feature Space in Classification Tasks for Manufacturing Data}},
   booktitle = {Proceedings of the 46th International Conference on Very Large Databases (VLDB)},
   editor = {Magdalena Balazinska and Xiaofang Zhou},
   publisher = {ACM Digital Library},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Proceedings of the VLDB Endowment},
   volume = {13(12)},
   type = {Konferenz-Beitrag},
   month = {August},
   year = {2020},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Classification techniques are increasingly adopted for quality control in manufacturing, e. g., to help domain experts identify the cause of quality issues of defective products. However, real-world data often imply a set of analytical challenges, which lead to a reduced classification performance. Major challenges are a high degree of multi-class imbalance within data and a heterogeneous feature space that arises from the variety of underlying products. This paper considers such a challenging use case in the area of End-of-Line testing, i. e., the final functional test of complex products. Existing solutions to classification or data pre-processing only address individual analytical challenges in isolation. We propose a novel classification system that explicitly addresses both challenges of multi-class imbalance and a heterogeneous feature space together. As main contribution, this system exploits domain knowledge to systematically prepare the training data. Based on an experimental evaluation on real-world data, we show that our classification system outperforms any other classification technique in terms of accuracy. Furthermore, we can reduce the amount of rework required to solve a quality issue of a product.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-32&engl=0}
}
@inproceedings {INPROC-2020-31,
   author = {Yannick Wilhelm and Peter Reimann and Wolfgang Gauchel and Bernhard Mitschang},
   title = {{Overview on Hybrid Approaches to Fault Detection and Diagnosis: Combining Data-driven, Physics-based and Knowledge-based Models}},
   booktitle = {Procedia CIRP: Proceedings of the 14th CIRP Conference on Intelligent Computation in Manufacturing Engineering (CIRP ICME)},
   publisher = {Elsevier BV},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2020},
   keywords = {Fault Detection; Fault Diagnosis; Hybrid Methods; Diagnostics and Maintenance; Knowledge-driven Methods; Machine Learning},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications,     I.2.1 Applications and Expert Systems},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {In this paper, we review hybrid approaches for fault detection and fault diagnosis (FDD) that combine data-driven analysis with physics-based and knowledge-based models to overcome a lack of data and to increase the FDD accuracy. We categorize these hybrid approaches according to the steps of an extended common workflow for FDD. This gives practitioners indications of which kind of hybrid FDD approach they can use in their application.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-31&engl=0}
}
@inproceedings {INPROC-2020-20,
   author = {Yannick Wilhelm and Ulf Schreier and Peter Reimann and Bernhard Mitschang and Holger Ziekow},
   title = {{Data Science Approaches to Quality Control in Manufacturing: A Review of Problems, Challenges and Architecture}},
   booktitle = {Springer Proceedings Series Communications in Computer and Information Science (CCIS)},
   publisher = {Springer},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   type = {Konferenz-Beitrag},
   month = {Oktober},
   year = {2020},
   keywords = {Data Science; Machine Learning; Quality Control; Challenges; Functional Architecture},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Manufacturing environments are characterized by non-stationary processes, constantly varying conditions, complex process interdependencies, and a high number of product variants. These and other aspects pose several challenges for common machine learning algorithms to achieve reliable and accurate predictions. This overview and vision paper provides a comprehensive list of common problems and challenges for data science approaches to quality control in manufacturing. We have derived these problems and challenges by inspecting three real-world use cases in the eld of product quality control and via a comprehensive literature study. We furthermore associate the identi ed problems and challenges to individual layers and components of a functional setup, as it can be found in manufacturing environments today. Additionally, we extend and revise this functional setup and this way propose our vision of a future functional software architecture. This functional architecture represents a visionary blueprint for solutions that are able to address all challenges for data science approaches in manufacturing quality control.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-20&engl=0}
}
@inproceedings {INPROC-2020-18,
   author = {Julian Ziegler and Peter Reimann and Florian Keller and Bernhard Mitschang},
   title = {{A Graph-based Approach to Manage CAE Data in a Data Lake}},
   booktitle = {Procedia CIRP: Proceedings of the 53rd CIRP Conference on Manufacturing Systems (CIRP CMS 2020)},
   publisher = {Elsevier},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2020},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Computer-aided engineering (CAE) applications generate vast quantities of heterogeneous data. Domain experts often fail to explore and analyze these data, because they are not integrated across di erent applications. Existing data management solutions are rather tailored to scientific applications. In our approach, we tackle this issue by combining a data lake solution with graph-based metadata management. This provides a holistic view of all CAE data and of the data-generating applications in one interconnected structure. Based on a prototypical implementation, we discuss how this eases the task of domain experts to explore and extract data for further analyses.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-18&engl=0}
}
@inproceedings {INPROC-2020-17,
   author = {Vitali Hirsch and Peter Reimann and Bernhard Mitschang},
   title = {{Incorporating Economic Aspects into Recommendation Ranking to Reduce Failure Costs}},
   booktitle = {Procedia CIRP: Proceedings of the 53rd CIRP Conference on Manufacturing Systems (CIRP CMS 2020)},
   publisher = {Elsevier},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2020},
   keywords = {decision support; predictive analytics; quality control; End-of-Line testing; classification; fault isolation; failure costs},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Machine learning approaches for manufacturing usually o er recommendation lists, e.g., to support humans in fault diagnosis. For instance, if a product does not pass the final check after the assembly, a recommendation list may contain likely faulty product components to be replaced. Thereby, the list ranks these components using their probabilities. However, these probabilities often di er marginally, while economic impacts, e.g., the costs for replacing components, di er significantly. We address this issue by proposing an approach that incorporates costs to re-rank a list. Our evaluation shows that this approach reduces fault-related costs when using recommendation lists to support human labor.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-17&engl=0}
}
@inproceedings {INPROC-2020-11,
   author = {Mathias Mormul and Pascal Hirmer and Christoph Stach and Bernhard Mitschang},
   title = {{Avoiding Vendor-Lockin in Cloud Monitoring using Generic Agent Templates}},
   booktitle = {Proceedings of the 23rd International Conference on Business Information Systems (BIS), 2020},
   publisher = {Springer},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--1},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2020},
   keywords = {Vendor Lock-in; Cloud monitoring; Monitoring agents; Genericity},
   language = {Deutsch},
   cr-category = {H.4.0 Information Systems Applications General},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Cloud computing passed the hype cycle long ago and firmly established itself as a future technology since then. However, to utilize the cloud optimally, and therefore, as cost-efficiently as possible, a continuous monitoring is key to prevent an over- or under-commissioning of resources. However, selecting a suitable monitoring solution is a challenging task. Monitoring agents that collect monitoring data are spread across the monitored IT environment. Therefore, the possibility of vendor lock-ins leads to a lack of flexibility when the cloud environment or the business needs change. To handle these challenges, we introduce $\backslash$textit{generic agent templates} that are applicable to many monitoring systems and support a replacement of monitoring systems. Solution-specific technical details of monitoring agents are abstracted from and system administrators only need to model generic agents, which can be transformed into solution-specific monitoring agents. The transformation logic required for this process is provided by domain experts to not further burden system administrators. Furthermore, we introduce an agent lifecycle to support the system administrator with the management and deployment of generic agents.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-11&engl=0}
}
@inproceedings {INPROC-2020-07,
   author = {Christoph Stach and Cl{\'e}mentine Gritti and Dennis Przytarski and Bernhard Mitschang},
   title = {{Trustworthy, Secure, and Privacy-aware Food Monitoring Enabled by Blockchains and the IoT}},
   booktitle = {Proceedings of the 18th Annual IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops), 23-27 March, 2020, Austin, Texas, USA},
   publisher = {IEEE},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--4},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {2020},
   keywords = {Attribute-based Credentials; Blockchain; Data Authentication; IoT; Privacy; Service Utility; Transparency; Trust},
   language = {Englisch},
   cr-category = {K.6.5 Security and Protection,     D.4.6 Operating Systems Security and Protection},
   contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {A large number of food scandals (e.g., falsely declared meat or non-compliance with hygiene regulations) are causing considerable concern to consumers. Although Internet of Things (IoT) technologies are used in the food industry to monitor production (e.g., for tracing the origin of meat or monitoring cold chains), the gathered data are not used to provide full transparency to the consumer. To achieve this, however, three aspects must be considered: a) The origin of the data must be verifiable, i.e., it must be ensured that the data originate from calibrated sensors. b) The data must be stored tamper-resistant, immutable, and open to all consumers. c) Despite this openness, the privacy of affected data subjects (e.g., the carriers) must still be protected. To this end, we introduce the SHEEPDOG architecture that {\ss}hepherds`` products from production to purchase to enable a trustworthy, secure, and privacy-aware food monitoring. In SHEEPDOG, attribute-based credentials ensure trustworthy data acquisition, blockchain technologies provide secure data storage, and fine-grained access control enables privacy-aware data provision.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-07&engl=0}
}
@inproceedings {INPROC-2020-06,
   author = {Cornelia Kiefer and Peter Reimann and Bernhard Mitschang},
   title = {{Prevent Low-Quality Analytics by Automatic Selection of the Best-Fitting Training Data}},
   booktitle = {Proceedings of the 53rd Hawaii International Conference on System Sciences (HICSS)},
   address = {Maui, Hawaii, USA},
   publisher = {Online},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1036--1045},
   type = {Konferenz-Beitrag},
   month = {Januar},
   year = {2020},
   isbn = {978-0-9981331-3-3},
   keywords = {data quality; domain-specific data analysis; text analysis; text similarity; training data},
   language = {Englisch},
   cr-category = {I.2.7 Natural Language Processing},
   ee = {https://scholarspace.manoa.hawaii.edu/bitstream/10125/63868/0103.pdf},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Data analysis pipelines consist of a sequence of various analysis tools. Most of these tools are based on supervised machine learning techniques and thus rely on labeled training data. Selecting appropriate training data has a crucial impact on analytics quality. Yet, most of the times, domain experts who construct analysis pipelines neglect the task of selecting appropriate training data. They rely on default training data sets, e.g., since they do not know which other training data sets exist and what they are used for. Yet, default training data sets may be very different from the domain-specific input data that is to be analyzed, leading to low-quality results. Moreover, these input data sets are usually unlabeled. Thus, information on analytics quality is not measurable with evaluation metrics. Our contribution comprises a method that (1) indicates the expected quality to the domain expert while constructing the analysis pipeline, without need for labels and (2) automatically selects the best-fitting training data. It is based on a measurement of the similarity between input and training data. In our evaluation, we consider the part-of-speech tagger tool and show that Latent Semantic Analysis (LSA) and Cosine Similarity are suited as indicators for the quality of analysis results and as basis for an automatic selection of the best-fitting training data.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-06&engl=0}
}
@inproceedings {INPROC-2020-04,
   author = {Christoph Stach and Cl{\'e}mentine Gritti and Bernhard Mitschang},
   title = {{Bringing Privacy Control back to Citizens: DISPEL - A Distributed Privacy Management Platform for the Internet of Things}},
   booktitle = {Proceedings of the 35th ACM/SIGAPP Symposium On Applied Computing (PDP).},
   address = {Brno, Czech Republic},
   publisher = {ACM Press},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--8},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {2020},
   keywords = {privacy; IoT; authorization concept; attribute-based access control},
   language = {Englisch},
   cr-category = {K.4.1 Computers and Society Public Policy Issues,     D.4.6 Operating Systems Security and Protection},
   contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The Internet of Things (IoT) is becoming increasingly popular. It enables a variety of novel applications. Such applications require a lot of data about their users. To this end, sensors continuously monitor various aspects of daily life. Despite the indisputable benefits of IoT applications, this is a severe privacy threat. Due to the GDPR coming into force, there is a need for action on the part of IoT vendors. In this paper, we therefore introduce a Privacy by Design approach for IoT applications called DISPEL. It provides a configuration method enabling users to specify globally, which application may access what data for which purpose. Privacy protection is then applied at the earliest stage possible, i.e., directly on the IoT devices generating the data. Data transmission is protected against unauthorized access and manipulation. Evaluation results show that DISPEL fulfills the requirements towards an IoT privacy system.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-04&engl=0}
}
@inproceedings {INPROC-2020-03,
   author = {Christoph Stach and Corinna Giebler and Manuela Wagner and Christian Weber and Bernhard Mitschang},
   title = {{AMNESIA: A Technical Solution towards GDPR-compliant Machine Learning}},
   booktitle = {Proceedings of the 6th International Conference on Information Systems Security and Privacy (ICISSP 2020)},
   editor = {Steven Furnell and Paolo Mori and Edgar Weippl and Olivier Camp},
   address = {Valletta, Malta},
   publisher = {SciTePress},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--12},
   type = {Konferenz-Beitrag},
   month = {Februar},
   year = {2020},
   keywords = {Machine Learning; Data Protection; Privacy Zones; Access Control; Model Management; Provenance; GDPR},
   language = {Englisch},
   cr-category = {K.4.1 Computers and Society Public Policy Issues,     I.5.1 Pattern Recognition Models},
   contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Machine Learning (ML) applications are becoming increasingly valuable due to the rise of IoT technologies. That is, sensors continuously gather data from different domains and make them available to ML for learning its models. This provides profound insights into the data and enables predictions about future trends. While ML has many advantages, it also represents an immense privacy risk. Data protection regulations such as the GDPR address such privacy concerns, but practical solutions for the technical enforcement of these laws are also required. Therefore, we introduce AMNESIA, a privacy-aware machine learning model provisioning platform. AMNESIA is a holistic approach covering all stages from data acquisition to model provisioning. This enables to control which application may use which data for ML as well as to make models ``forget'' certain knowledge.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-03&engl=0}
}
@inproceedings {INPROC-2019-32,
   author = {Vitali Hirsch and Peter Reimann and Bernhard Mitschang},
   title = {{Data-Driven Fault Diagnosis in End-of-Line Testing of Complex Products}},
   booktitle = {Proceedings of the 6th IEEE International Conference on Data Science and Advanced Analytics (DSAA 2019), Washington, D.C., USA},
   publisher = {IEEE Xplore},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   type = {Konferenz-Beitrag},
   month = {Oktober},
   year = {2019},
   keywords = {decision support; classification; ensembles; automotive; fault diagnosis; quality management; sampling},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Machine learning approaches may support various use cases in the manufacturing industry. However, these approaches often do not address the inherent characteristics of the real manufacturing data at hand. In fact, real data impose analytical challenges that have a strong influence on the performance and suitability of machine learning methods. This paper considers such a challenging use case in the area of End-of-Line testing, i.e., the final functional check of complex products after the whole assembly line. Here, classification approaches may be used to support quality engineers in identifying faulty components of defective products. For this, we discuss relevant data sources and their characteristics, and we derive the resulting analytical challenges. We have identified a set of sophisticated data-driven methods that may be suitable to our use case at first glance, e.g., methods based on ensemble learning or sampling. The major contribution of this paper is a thorough comparative study of these methods to identify whether they are able to cope with the analytical challenges. This comprises the discussion of both fundamental theoretical aspects and major results of detailed experiments we have performed on the real data of our use case.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2019-32&engl=0}
}
@inproceedings {INPROC-2019-09,
   author = {Christoph Stach and Bernhard Mitschang},
   title = {{ECHOES: A Fail-safe, Conflict Handling, and Scalable Data Management Mechanism for the Internet of Things}},
   booktitle = {Proceedings of the 23rd European Conference on Advances in Databases and Information Systems: ADBIS '19; Bled, Slovenia, September 8-11, 2019},
   editor = {Tatjana Welzer and Johann Eder and Vili Podgorelec and Aida Kamisalic Latific},
   publisher = {Springer},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Lecture Notes in Computer Science (LNCS)},
   pages = {1--16},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2019},
   keywords = {Internet of Things; Data Exchange; Synchronization Protocol},
   language = {Englisch},
   cr-category = {H.2.7 Database Administration,     H.2.4 Database Management Systems},
   contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The Internet of Things (IoT) and Smart Services are becoming increasingly popular. Such services adapt to a user's needs by using sensors to detect the current situation. Yet, an IoT service has to capture its required data by itself, even if another service has already captured it before. There is no data exchange mechanism adapted to the IoT which enables sharing of sensor data among services and across devices. Therefore, we introduce a data management mechanism for the IoT. Due to its applied state-based synchronization protocol called ECHOES. It is fail-safe in case of connection failures, it detects and handles data conflicts, it is geared towards devices with limited resources, and it is highly scalable. We embed ECHOES into a data provisioning infrastructure, namely the Privacy Management Platform and the Secure Data Container. Evaluation results verify the practicability of our approach.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2019-09&engl=0}
}
@inproceedings {INPROC-2019-08,
   author = {Cornelia Kiefer and Peter Reimann and Bernhard Mitschang},
   title = {{A Hybrid Information Extraction Approach Exploiting Structured Data Within a Text Mining Process}},
   booktitle = {18. Fachtagung des GI-Fachbereichs ,,Datenbanken und Informationssysteme (DBIS), 4.-8. M{\"a}rz 2019, Rostock, Germany, Proceedings.},
   editor = {Torsten Grust and Felix Naumann and Alexander B{\"o}hm and Wolfgang Lehner and Theo H{\"a}rder and Erhard et al. Rahm},
   address = {Bonn},
   publisher = {Gesellschaft f$\backslash$``{u}r Informatik e.V. (GI)},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {149--168},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {2019},
   keywords = {information extraction; clustering; text mining; free text fields},
   language = {Englisch},
   cr-category = {I.2.7 Natural Language Processing},
   ee = {https://doi.org/10.18420/btw2019-10},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Many data sets encompass structured data fields with embedded free text fields. The text fields allow customers and workers to input information which cannot be encoded in structured fields. Several approaches use structured and unstructured data in isolated analyses. The result of isolated mining of structured data fields misses crucial information encoded in free text. The result of isolated text mining often mainly repeats information already available from structured data. The actual information gain of isolated text mining is thus limited. The main drawback of both isolated approaches is that they may miss crucial information. The hybrid information extraction approach suggested in this paper adresses this issue. Instead of extracting information that in large parts was already available beforehand, it extracts new, valuable information from free texts. Our solution exploits results of analyzing structured data within the text mining process, i.e., structured information guides and improves the information extraction process on textual data. Our main contributions comprise the description of the concept of hybrid information extraction as well as a prototypical implementation and an evaluation with two real-world data sets from aftersales and production with English and German free text fields.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2019-08&engl=0}
}
@inproceedings {INPROC-2019-06,
   author = {Christoph Stach and Frank Steimle and Clementine Gritti and Bernhard Mitschang},
   title = {{PSSST! The Privacy System for Smart Service Platforms: An Enabler for Confidable Smart Environments}},
   booktitle = {Proceedings of the 4th International Conference on Internet of Things, Big Data and Security (IoTBDS '19)},
   publisher = {SciTePress},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--12},
   type = {Konferenz-Beitrag},
   month = {Mai},
   year = {2019},
   keywords = {Privacy; Access Control; Internet of Things; Smart Service Platform; Sensors; Actuators; Stream Processing},
   language = {Englisch},
   cr-category = {K.4.1 Computers and Society Public Policy Issues,     D.4.6 Operating Systems Security and Protection},
   contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The Internet of Things and its applications are becoming increasingly popular. Especially Smart Service Platforms like Alexa are in high demand. Such a platform retrieves data from sensors, processes them in a back-end, and controls actuators in accordance with the results. Thereby, all aspects of our everyday life can be managed. In this paper, we reveal the downsides of this technology by identifying its privacy threats based on a real-world application. Our studies show that current privacy systems do not tackle these issues adequately. Therefore, we introduce PSSST!, a user-friendly and comprehensive privacy system for Smart Service Platforms limiting the amount of disclosed private information while maximizing the quality of service at the same time.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2019-06&engl=0}
}
@inproceedings {INPROC-2018-54,
   author = {Alejandro Villanueva Zacarias and Peter Reimann and Bernhard Mitschang},
   title = {{A Framework to Guide the Selection and Configuration of Machine-Learning-based Data Analytics Solutions in Manufacturing}},
   booktitle = {Proceedings of the 51st CIRP Conference on Manufacturing Systems (CIRP CMS 2018)},
   publisher = {Elsevier BV},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {153--158},
   type = {Konferenz-Beitrag},
   month = {Mai},
   year = {2018},
   keywords = {data analytics; machine learning; learning algorithms; generative design},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Users in manufacturing willing to apply machine-learning-based (ML-based) data analytics face challenges related to data quality or to the selection and configuration of proper ML algorithms. Current approaches are either purely empirical or reliant on technical data. This makes understanding and comparing candidate solutions difficult, and also ignores the way it impacts the real application problem. In this paper, we propose a framework to generate analytics solutions based on a systematic profiling of all aspects involved. With it, users can visually and systematically explore relevant alternatives for their specific scenario, and obtain recommendations in terms of costs, productivity, results quality, or execution time.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-54&engl=0}
}
@inproceedings {INPROC-2018-53,
   author = {Vitali Hirsch and Peter Reimann and Oliver Kirn and Bernhard Mitschang},
   title = {{Analytical Approach to Support Fault Diagnosis and Quality Control in End-Of-Line Testing}},
   booktitle = {Proceedings of the 51st CIRP Conference on Manufacturing Systems (CIRP CMS 2018)},
   publisher = {Elsevier BV},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1333--1338},
   type = {Konferenz-Beitrag},
   month = {Mai},
   year = {2018},
   keywords = {Analytics; decision support; recommendation system; fault diagnosis; quality control},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Operators in end-of-line testing of assembly lines often try out multiple solutions until they can solve a product quality issue. This calls for a decision support system based on data analytics that effectively helps operators in fault diagnosis and quality control. However, existing analytical approaches do not consider the specific data characteristics being prevalent in the area of End-of-Line (EoL) testing. We address this issue by proposing an analytical approach that is tailored to EoL testing. We show how to implement this approach in a real-world use case of a large automotive manufacturer, which reveals its potential to reduce unnecessary rework.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-53&engl=0}
}
@inproceedings {INPROC-2018-38,
   author = {Andreas Liebing and Lutz Ashauer and Uwe Breitenb{\"u}cher and Thomas G{\"u}nther and Michael Hahn and K{\'a}lm{\'a}n K{\'e}pes and Oliver Kopp and Frank Leymann and Bernhard Mitschang and Ana C. Franco da Silva and Ronald Steinke},
   title = {{The SmartOrchestra Platform: A Configurable Smart Service Platform for IoT Systems}},
   booktitle = {Papers from the 12th Advanced Summer School on Service-Oriented Computing (SummerSoC 2018)},
   publisher = {IBM Research Division},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {14--21},
   type = {Konferenz-Beitrag},
   month = {Oktober},
   year = {2018},
   keywords = {SmartOrchestra Platform; Smart Services; Cyber-Physical Systems; Internet of Things},
   language = {Englisch},
   cr-category = {K.6 Management of Computing and Information Systems,     D.2.7 Software Engineering Distribution, Maintenance, and Enhancement,     D.2.12 Software Engineering Interoperability},
   ee = {https://www.2018.summersoc.eu/},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {The Internet of Things is growing rapidly while still missing a universal operat-ing and management platform for multiple diverse use cases. Such a platform should provide all necessary functionalities and the underlying infrastructure for the setup, execution and composition of Smart Services. The concept of Smart Services enables the connection and integration of cyber-physical systems (CPS) and technologies (i.e., sensors and actuators) with business-related applications and services. Therefore, the SmartOrchestra Platform provides an open and standards-based service platform for the utilization of public administrative and business-related Smart Services. It combines the features of an operating plat-form, a marketplace, a broker, and a notary for a cloud-based operation of Smart Services. Thus, users of cyber-physical systems are free to choose their control applications, no matter what device they are using (e.g., smartphone, tablet or personal computer) and they also become independent of the manufacturers’ software. This will enable new business opportunities for different stakeholders in the market and allows flexibly composing Smart Services.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-38&engl=0}
}
@inproceedings {INPROC-2018-34,
   author = {Ana Cristina Franco da Silva and Pascal Hirmer and Rafael Koch Peres and Bernhard Mitschang},
   title = {{An Approach for CEP Query Shipping to Support Distributed IoT Environments}},
   booktitle = {Proceedings of the IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)},
   publisher = {IEEE},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {247--252},
   type = {Konferenz-Beitrag},
   month = {Oktober},
   year = {2018},
   isbn = {978-1-5386-3227-7},
   doi = {10.1109/PERCOMW.2018.8480241},
   language = {Englisch},
   cr-category = {H.0 Information Systems General},
   ee = {https://ieeexplore.ieee.org/document/8480241},
   contact = {francoaa@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-34&engl=0}
}
@inproceedings {INPROC-2018-28,
   author = {Mathias Mormul and Pascal Hirmer and Matthias Wieland and Bernhard Mitschang},
   title = {{Layered Modeling Approach for Distributed Situation Recognition in Smart Environments}},
   booktitle = {Tagungsband: SMART 2018, The Seventh International Conference on Smart Cities, Systems, Devices and Technologies},
   publisher = {Xpert Publishing Services},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {47--53},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2018},
   isbn = {978-1-61208-653-8},
   keywords = {Industry 4.0; Edge Computing; Smart Factories; Smart Homes; Situation Recognition; Distribution Pattern},
   language = {Englisch},
   cr-category = {H.0 Information Systems General},
   contact = {mathias.mormul@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-28&engl=0}
}
@inproceedings {INPROC-2018-24,
   author = {Dominik Lucke and Peter Einberger and Daniel Schel and Michael Luckert and Matthias Schneider and Emir Cuk and Thomas Bauernhansl and Matthias Wieland and Frank Steimle and Bernhard Mitschang},
   title = {{Implementation of the MIALinx Integration Concept for Future Manufacturing Environments to Enable Retrofitting of Machines}},
   booktitle = {Proceedings of the 12th CIRP Conference on Intelligent Computation in Manufacturing Engineering (CIRP ICME '18); Naples, Italy, July 18-20, 2018},
   publisher = {Elsevier},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {596--601},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2018},
   doi = {10.1016/j.procir.2019.02.084},
   keywords = {Manufacturing; Smart Factory; Industrie 4.0; Manufacturing Service Bus; Rules; Integration; MIALinx},
   language = {Englisch},
   cr-category = {H.4.0 Information Systems Applications General,     I.2.1 Applications and Expert Systems},
   ee = {http://www.sciencedirect.com/science/article/pii/S221282711930201X},
   contact = {Frank.Steimle@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Manufacturing has to adapt permanently to changing situations in order to stay competitive. It demands a flexible and easy-to-use integration of production equipment and ICT systems on the shop floor. The contribution of this paper is the presentation of the implementation architecture of the Manufacturing Integration Assistant (MIALinx) that simplifies this challenge. The integration steps range from integrating sensors over collecting and rule-based processing of sensor information to the execution of required actions. Furthermore, we describe the implementation of MIALinx by commissioning it in a manufacturing environment to retrofit legacy machines for Industrie 4.0. Finally, we validate the suitability of our approach by applying our solution in the production environment of a medium-size company.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-24&engl=0}
}
@inproceedings {INPROC-2018-22,
   author = {Rachaa Ghabri and Pascal Hirmer and Bernhard Mitschang},
   title = {{A Hybrid Approach to Implement Data Driven Optimization into Production Environments}},
   booktitle = {Proceedings of the 21st International Conference on Business Information Systems (BIS)},
   editor = {Witold Abramowicz and Adrian Paschke},
   publisher = {Springer Berlin Heidelberg},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Lecture Notes in Business Information Processing},
   volume = {320},
   pages = {3--14},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2018},
   isbn = {978-3-319-93930-8},
   doi = {https://doi.org/10.1007/978-3-319-93931-5},
   issn = {1865-1356},
   keywords = {Data driven optimization; Production environment; Top-down; Bottom-up},
   language = {Englisch},
   cr-category = {H.0 Information Systems General},
   ee = {https://link.springer.com/chapter/10.1007/978-3-319-93931-5_1},
   contact = {rachaa.ghabri@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-22&engl=0}
}
@inproceedings {INPROC-2018-19,
   author = {Christoph Stach and Frank Steimle and Bernhard Mitschang},
   title = {{THOR - Ein Datenschutzkonzept f{\"u}r die Industrie 4.0: Datenschutzsysteme f{\"u}r die Smart Factory zur Realisierung der DSGVO}},
   booktitle = {Informatik 2018: Zukunft der Arbeit - Zukunft der Informatik, Tagungsband der 48. Jahrestagung der Gesellschaft f{\"u}r Informatik e.V. (GI), 26.09. - 27.09.2018, Berlin.},
   publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Lecture Notes in Informatics (LNI)},
   pages = {1--12},
   type = {Workshop-Beitrag},
   month = {September},
   year = {2018},
   keywords = {Datenschutz; Internet der Dinge; Sensoren; Industrie 4.0; Datenstr{\"o}me; Smart Devices},
   language = {Deutsch},
   cr-category = {K.4.1 Computers and Society Public Policy Issues,     D.4.6 Operating Systems Security and Protection},
   contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Der Aufschwung des Internets der Dinge (IoT) sorgt f{\"u}r eine voranschreitende Digitalisierung. Sensoren in Alltagsgegenst{\"a}nden erfassen unterschiedliche Aspekte des t{\"a}glichen Lebens. Durch eine Vernetzung dieser Ger{\"a}te, lassen sich die Daten miteinander kombinieren und daraus neues Wissen generieren. In der Industrie 4.0 werden beispielsweise die am Produktionsprozess beteiligten cyber-physischen Systeme dazu genutzt, um mit den von ihnen erfassten Daten Produktionsprozesse zu optimieren. Da auch der Mensch ein relevanter Bestandteil des Produktionsprozesses ist, werden z.B. mittels Smart Watches auch {\"u}ber diesen viele Daten erfasst. Nicht erst mit der Einf{\"u}hrung der neuen Datenschutzgrundverordnung (DSGVO) sind hierbei allerdings Datenschutzanforderungen zu beachten: Es m{\"u}ssen nicht nur die privaten Daten der Nutzer gesch{\"u}tzt werden, sondern es muss auch sichergestellt werden, dass die Datenverarbeitung und -analyse dadurch so wenig wie m{\"o}glich behindert werden. Wir stellen hierf{\"u}r ein neuartiges Datenschutzkonzept f{\"u}r die Industrie 4.0 (THOR) vor, mit dem Kompromisse zwischen erforderlichem Datenschutz und gew{\"u}nschter Datenqualit{\"a}t gefunden werden k{\"o}nnen, der der DSGVO gen{\"u}gt.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-19&engl=0}
}
@inproceedings {INPROC-2018-18,
   author = {Ana Cristina Franco da Silva and Pascal Hirmer and Uwe Breitenb{\"u}cher and Oliver Kopp and Bernhard Mitschang},
   title = {{TDLIoT: A Topic Description Language for the Internet of Things}},
   booktitle = {ICWE 2018: Web Engineering},
   editor = {Tommi Mikkonen and Ralf Klamma and Juan Hern{\'a}ndez},
   publisher = {Springer Berlin Heidelberg},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Lecture Notes in Computer Science (LNCS)},
   volume = {10845},
   pages = {333--348},
   type = {Konferenz-Beitrag},
   month = {Mai},
   year = {2018},
   doi = {10.1007/978-3-319-91662-0_27},
   keywords = {Internet of Things; Publish-subscribe; Description Language},
   language = {Englisch},
   cr-category = {K.6 Management of Computing and Information Systems,     D.2.12 Software Engineering Interoperability},
   ee = {https://link.springer.com/chapter/10.1007/978-3-319-91662-0_27},
   contact = {franco-da-silva@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-18&engl=0}
}
@inproceedings {INPROC-2018-17,
   author = {Marc H{\"u}ffmeyer and Pascal Hirmer and Bernhard Mitschang and Ulf Schreier and Matthias Wieland},
   title = {{Situation-Aware Access Control for Industrie 4.0}},
   booktitle = {ICISSP 2017: Information Systems Security and Privacy},
   publisher = {Springer},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Communications in Computer and Information Science},
   volume = {867},
   pages = {59--83},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2018},
   keywords = {Authorization; Attribute based access control; Situation-awareness; REST; Internet of Things},
   language = {Englisch},
   cr-category = {E.0 Data General},
   ee = {https://link.springer.com/chapter/10.1007/978-3-319-93354-2_4},
   contact = {Pascal.Hirmer@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-17&engl=0}
}
@inproceedings {INPROC-2018-15,
   author = {Christoph Stach and Sascha Alpers and Stefanie Betz and Frank D{\"u}rr and Andreas Fritsch and Kai Mindermann and Saravana Murthy Palanisamy and Gunther Schiefer and Manuela Wagner and Bernhard Mitschang and Andreas Oberweis and Stefan Wagner},
   title = {{The AVARE PATRON: A Holistic Privacy Approach for the Internet of Things}},
   booktitle = {Proceedings of the 15th International Conference on Security and Cryptography (SECRYPT '18)},
   publisher = {INSTICC Press},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--8},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2018},
   keywords = {Privacy; IoT Apps; Smart Things; Stream Processing; Privacy Preferences Elicitation \& Veri\&\#64257; cation},
   language = {Englisch},
   cr-category = {K.4.1 Computers and Society Public Policy Issues,     D.4.6 Operating Systems Security and Protection},
   contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Softwaretechnologie, Software Engineering;     Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Verteilte Systeme},
   abstract = {Applications for the Internet of Things are becoming increasingly popular. Due to the large amount of available context data, such applications can be used effectively in many domains. By interlinking these data and analyzing them, it is possible to gather a lot of knowledge about a user. Therefore, these applications pose a threat to privacy. In this paper, we illustrate this threat by looking at a real-world application scenario. Current state of the art focuses on privacy mechanisms either for Smart Things or for big data processing systems. However, our studies show that for a comprehensive privacy protection a holistic view on these applications is required. Therefore, we describe how to combine two promising privacy approaches from both categories, namely AVARE and PATRON. Evaluation results confirm the thereby achieved synergy effects.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-15&engl=0}
}
@inproceedings {INPROC-2018-14,
   author = {Corinna Giebler and Christoph Stach and Holger Schwarz and Bernhard Mitschang},
   title = {{BRAID - A Hybrid Processing Architecture for Big Data}},
   booktitle = {Proceedings of the 7th International Conference on Data Science, Technology and Applications (DATA 2018)},
   publisher = {INSTICC Press},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--8},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2018},
   keywords = {Big Data; IoT; Batch Processing; Stream Processing; Lambda Architecture; Kappa Architecture},
   language = {Englisch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     H.2.4 Database Management Systems,     H.2.8 Database Applications},
   contact = {Senden Sie eine e-Mail an Corinna.Giebler@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The Internet of Things is applied in many domains and collects vast amounts of data. This data provides access to a lot of knowledge when analyzed comprehensively. However, advanced analysis techniques such as predictive or prescriptive analytics require access to both, history data, i.e., long-term persisted data, and real-time data as well as a joint view on both types of data. State-of-the-art hybrid processing architectures for big data - namely, the Lambda and the Kappa Architecture - support the processing of history data and real-time data. However, they lack of a tight coupling of the two processing modes. That is, the user has to do a lot of work manually in order to enable a comprehensive analysis of the data. For instance, the user has to combine the results of both processing modes or apply knowledge from one processing mode to the other. Therefore, we introduce a novel hybrid processing architecture for big data, called BRAID. BRAID intertwines the processing of history data and real-time data by adding communication channels between the batch engine and the stream engine. This enables to carry out comprehensive analyses automatically at a reasonable overhead.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-14&engl=0}
}
@inproceedings {INPROC-2018-10,
   author = {Jan K{\"o}nigsberger and Bernhard Mitschang},
   title = {{R2SMA - A Middleware Architecture to Access Legacy Enterprise Web Services using Lightweight REST APIs}},
   booktitle = {Proceedings of the 20th International Conference on Enterprise Information Systems},
   editor = {Slimane Hammoudi and Michal Smialek and Olivier Camp and Joaquim Filipe},
   publisher = {SciTePress},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {704--711},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {2018},
   isbn = {978-989-758-298-1},
   keywords = {REST; API; SOAP; Web Service; SOA; Enterprise SOA; Architecture},
   language = {Englisch},
   cr-category = {H.5.4 Hypertext/Hypermedia,     H.3.5 Online Information Services},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-10&engl=0}
}
@inproceedings {INPROC-2018-05,
   author = {Christoph Stach and Bernhard Mitschang},
   title = {{CURATOR - A Secure Shared Object Store: Design, Implementation, and Evaluation of a Manageable, Secure, and Performant Data Exchange Mechanism for Smart Devices}},
   booktitle = {Proceedings of the 33rd ACM/SIGAPP Symposium On Applied Computing (DTTA)},
   publisher = {ACM Press},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--8},
   type = {Konferenz-Beitrag},
   month = {April},
   year = {2018},
   keywords = {data exchange; smart devices; shared object store; security},
   language = {Englisch},
   cr-category = {K.4.1 Computers and Society Public Policy Issues,     D.4.6 Operating Systems Security and Protection},
   contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Nowadays, smart devices have become incredible popular - literally everybody has one. Due to an enormous quantity of versatile apps, these devices positively affect almost every aspect of their users' lives. E.g., there are apps collecting and monitoring health data from a certain domain such as diabetes-related or respiration-related data. However, they cannot display their whole potential since they have only access to their own data and cannot combine it with data from other apps, e.g., in order to create a comprehensive electronic health record. On that account, we introduce a seCURe shAred objecT stORe called CURATOR. In CURATOR apps cannot only manage their own data in an easy and performant way, but they can also share it with other apps. Since some of the data is confidential, CURATOR has several security features, including authentication, fine-grained access control, and encryption. In this paper, we discuss CURATOR's design and implementation and evaluate its performance.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-05&engl=0}
}
@inproceedings {INPROC-2018-03,
   author = {Christoph Stach and Frank Steimle and Bernhard Mitschang},
   title = {{The Privacy Management Platform: An Enabler for Device Interoperability and Information Security in mHealth Applications}},
   booktitle = {Proceedings of the 11th International Conference on Health Informatics (HEALTHINF 2018)},
   publisher = {SciTePress},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--12},
   type = {Konferenz-Beitrag},
   month = {Januar},
   year = {2018},
   keywords = {mHealth; Device Interoperability; Information Security; COPD; Privacy Management Platform},
   language = {Englisch},
   cr-category = {H.5.0 Information Interfaces and Presentation General,     K.6.5 Security and Protection,     K.8 Personal Computing},
   contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Chronic diseases are on the rise. Afflicted patients require persistent therapy and periodic screenings. This causes high treatment costs and overburdened physicians. Innovative approaches that enable patients to perform treatment methods on their own are badly needed. Telemedical approaches with the aid of modern Smartphones connected to medical devices (the so-called mHealth) can be the answer. However, mHealth apps face two key challenges, namely device interoperability and information security. In this paper, we describe how the Privacy Management Platform (PMP) and its extendable Resources can contribute to these challenges. Therefore, we analyze a real-world mHealth app and derive generic functional units, each realizing a certain task recurring frequently within mHealth apps, e.g., metering, data storage, or data transmission. For each functional unit we provide a PMP Resource, enabling both, device interoperability and information security. Finally, we revise the analyzed mHealth app using the Resources in order to evaluate our approach.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-03&engl=0}
}
@inproceedings {INPROC-2018-01,
   author = {Christoph Stach and Bernhard Mitschang},
   title = {{ACCESSORS: A Data-Centric Permission Model for the Internet of Things}},
   booktitle = {Proceedings of the 4th International Conference on Information Systems Security and Privacy (ICISSP 2018).},
   publisher = {SciTePress},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--11},
   type = {Konferenz-Beitrag},
   month = {Januar},
   year = {2018},
   keywords = {Permission Model; Data-Centric; Derivation Transparent; Fine-Grained; Context-Sensitive; IoT},
   language = {Englisch},
   cr-category = {K.4.1 Computers and Society Public Policy Issues,     D.4.6 Operating Systems Security and Protection},
   contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The Internet of Things (IoT) is gaining more and more relevance. Due to innovative IoT devices equipped with novel sensors, new application domains come up continuously. These domains include Smart Homes, Smart Health, and Smart Cars among others. As the devices not only collect a lot of data about the user, but also share this information with each other, privacy is a key issue for IoT applications. However, traditional privacy systems cannot be applied to the IoT directly due to different requirements towards the underlying permission models. Therefore, we analyze existing permission models regarding their applicability in the IoT domain. Based on this analysis, we come up with a novel permission model, implement it in a privacy system, and assess its utility.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-01&engl=0}
}
@inproceedings {INPROC-2017-70,
   author = {Jan K{\"o}nigsberger and Bernhard Mitschang},
   title = {{Business Objects plus (BO+): An Approach to Enhance Service Reuse and Integration in Cross-Domain SOA Compounds}},
   booktitle = {Proceedings of the 2017 IEEE International Conference on Information Reuse and Integration},
   editor = {Chengcui Zhang and Balaji Palanisamy and Latifur Khan and Sahra Sedigh Sarvestani},
   address = {Los Alamitos, Washington, Tokyo},
   publisher = {IEEE Computer Society},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {49--58},
   type = {Konferenz-Beitrag},
   month = {August},
   year = {2017},
   doi = {10.1109/IRI.2017.28},
   isbn = {978-1-5386-1562-1},
   keywords = {SOA, data model, abstraction, service interface, business objects},
   language = {Deutsch},
   cr-category = {E.2 Data Storage Representations,     H.3.5 Online Information Services},
   ee = {http://ieeexplore.ieee.org/document/8102918/},
   contact = {Jan K{\"o}nigsberger jan.koenigsberger@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-70&engl=0}
}
@inproceedings {INPROC-2017-49,
   author = {Eva Hoos and Matthias Wieland and Bernhard Mitschang},
   title = {{Analysis Method for Conceptual Context Modeling Applied in Production Environments}},
   booktitle = {Proceedings of 20th International Conference on Business Information Systems (BIS)},
   publisher = {Springer International Publishing},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {313--325},
   type = {Konferenz-Beitrag},
   month = {Mai},
   year = {2017},
   keywords = {Context-awareness; production environments; Industry 4.0},
   language = {Englisch},
   cr-category = {J.1 Administration Data Processing},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-49&engl=0}
}
@inproceedings {INPROC-2017-48,
   author = {Eva Hoos and Pascal Hirmer and Bernhard Mitschang},
   title = {{Context-Aware Decision Information Packages: An Approach to Human-Centric Smart Factories}},
   booktitle = {Proceedings of the 21st European Conference on Advances in Databases and Information Systems (ADBIS)},
   publisher = {Springer International Publishing AG 2017},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {42--56},
   type = {Konferenz-Beitrag},
   month = {August},
   year = {2017},
   keywords = {Industry 4.0; Context-awareness; Data Provisioning; Smart Factory},
   language = {Deutsch},
   cr-category = {H.3.3 Information Search and Retrieval},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-48&engl=0}
}
@inproceedings {INPROC-2017-41,
   author = {Matthias Wieland and Frank Steimle and Bernhard Mitschang and Dominik Lucke and Peter Einberger and Daniel Schel and Michael Luckert and Thomas Bauernhansl},
   title = {{Rule-Based Integration of Smart Services Using the Manufacturing Service Bus}},
   booktitle = {Proceedings of 14th IEEE International Conference on Ubiquitous Intelligence and Computing (UIC2017)},
   address = {Fremont, USA},
   publisher = {IEEE},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--8},
   type = {Konferenz-Beitrag},
   month = {August},
   year = {2017},
   doi = {10.1109/UIC-ATC.2017.8397529},
   keywords = {Rules; Integration; Manufacturing; Smart Factory; Industrie 4.0; Manufacturing Service Bus},
   language = {Englisch},
   cr-category = {H.4.0 Information Systems Applications General,     I.2.1 Applications and Expert Systems},
   ee = {https://ieeexplore.ieee.org/document/8397529/},
   contact = {Senden Sie eine E-Mail an Frank.Steimle@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Factories have to adapt permanently to changing situations in order to stay competitive. Premise to achieve this objective is up-to-date information on all levels of a factory and during the product life cycle, so that men and machine can optimize their activities according to their tasks. One approach to implement this economically is the massive application of sensors and information and communication technologies (ICT) leading to a Smart Factory. This process and related applications are summarized under the term of the forth industrial revolution (Industrie 4.0). It demands a flexible and easy-to-use integration of assets on the shop floor with ICT systems. The contribution of this paper is the MIALinx system that enables all these steps. The steps range from the integration to the sensing and analyzing of the sensor data to the execution of required actions. Furthermore, MIALinx provides an abstract rule based approach for users to model the behavior of the system. The presented system is based on concepts and technologies of the Internet of Things and service-oriented middleware. The main users targeted with our system are small and medium-sized enterprises that do not have the expertise or the investment possibilities to invest in completely new Industrie 4.0 systems but rather use their existing production assets and enrich them to achieve Industrie 4.0 capability.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-41&engl=0}
}
@inproceedings {INPROC-2017-40,
   author = {Eva Hoos and Pascal Hirmer and Bernhard Mitschang},
   title = {{Improving Problem Resolving on the Shop Floor by Context-Aware Decision Information Packages}},
   booktitle = {Proceedings of the CAiSE 2017 Forum},
   editor = {Xavier Franch and Jolita Ralyt{\'e}},
   address = {Essen},
   publisher = {CEUR Workshop Proceedings},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {121--128},
   type = {Workshop-Beitrag},
   month = {Juni},
   year = {2017},
   keywords = {Industry 4.0; Context-Awareness; Engineering},
   language = {Englisch},
   cr-category = {J.1 Administration Data Processing},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-40&engl=0}
}
@inproceedings {INPROC-2017-38,
   author = {Christian Weber and Jan K{\"o}nigsberger and Laura Kassner and Bernhard Mitschang},
   title = {{M2DDM – A Maturity Model for Data-Driven Manufacturing}},
   booktitle = {Manufacturing Systems 4.0 – Proceedings of the 50th CIRP Conference on Manufacturing Systems (CIRP CMS); Taichung, Taiwan, May 3-5, 2017},
   editor = {Mitchell M. Tseng and Hung-Yin Tsai and Yue Wang},
   publisher = {Elsevier},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Procedia CIRP},
   volume = {63},
   pages = {173--178},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2017},
   doi = {https://doi.org/10.1016/j.procir.2017.03.309},
   issn = {2212-8271},
   keywords = {Maturity Model; Industrie 4.0; Industrial Internet; Reference Architectures; Digital Twin; Edge Analytics},
   language = {Englisch},
   cr-category = {H.1.0 Information Systems Models and Principles General,     H.4.0 Information Systems Applications General},
   ee = {http://www.sciencedirect.com/science/article/pii/S2212827117304973},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-38&engl=0}
}
@inproceedings {INPROC-2017-32,
   author = {Christoph Stach and Frank D{\"u}rr and Kai Mindermann and Saravana Murthy Palanisamy and Muhammad Adnan Tariq and Bernhard Mitschang and Stefan Wagner},
   title = {{PATRON - Datenschutz in Datenstromverarbeitungssystemen}},
   booktitle = {Informatik 2017: Digitale Kulturen, Tagungsband der 47. Jahrestagung der Gesellschaft f{\"u}r Informatik e.V. (GI), 25.09. - 29.09.2017, Technische Universit{\"a}t Chemnitz},
   publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {LNI},
   pages = {1--12},
   type = {Workshop-Beitrag},
   month = {September},
   year = {2017},
   keywords = {Datenschutz; Zugriffskontrolle; Datenstr{\"o}me; Internet der Dinge; Privatheit; Sensoren},
   language = {Deutsch},
   cr-category = {K.4.1 Computers and Society Public Policy Issues,     D.4.6 Operating Systems Security and Protection},
   contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Softwaretechnologie, Software Engineering;     Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Verteilte Systeme},
   abstract = {Angetrieben durch die stetig voranschreitende Digitalisierung gewinnt das Internet der Dinge (engl. IoT) immer mehr an Bedeutung. Im IoT werden technische Ger{\"a}te mit unterschiedlichen Sensoren ausgestattet und miteinander vernetzt. Dadurch werden neuartige Anwendungen beispielsweise im Bereich E-Health erm{\"o}glicht, in denen Sensordaten miteinander kombiniert und so in h{\"o}herwertige Informationen umgewandelt werden. Die von diesen Anwendungen abgeleiteten Informationen verraten viel {\"u}ber den Nutzer und m{\"u}ssen daher besonders gesch{\"u}tzt werden. H{\"a}ufig hat der Nutzer allerdings keine Kontrolle {\"u}ber die Verarbeitung seiner Daten, ganz davon zu schweigen, dass er das Ausma{\ss} und die Art der daraus ableitbaren Informationen nicht ermessen kann. In diesem Artikel stellen wir daher einen neuartigen Kontrollmechanismus vor, der private Informationen im IoT sch{\"u}tzt. Anstelle von abstrakten Datenschutzregeln f{\"u}r einzelne Sensoren definiert der Nutzer Muster, die es zu sch{\"u}tzen gilt. Ein Muster kann beispielsweise eine Kombination aus Messwerten sein, die auf eine bestimmte Krankheit schlie{\ss}en lassen. Der Nutzer definiert die zu verheimlichenden Informationen nat{\"u}rlichsprachlich, und ein Dom{\"a}nenexperte setzt diese in formale Regeln um. Sind diese Regeln zu restriktiv, so kann die Anwendung ihre angedachte Funktionalit{\"a}t nicht erbringen. Daher muss bez{\"u}glich der Servicequalit{\"a}t ein Kompromiss zwischen gew{\"u}nschter Privatheit und ben{\"o}tigter Funktionalit{\"a}t gefunden werden.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-32&engl=0}
}
@inproceedings {INPROC-2017-28,
   author = {Ana Cristina Franco da Silva and Uwe Breitenb{\"u}cher and Pascal Hirmer and K{\'a}lm{\'a}n K{\'e}pes and Oliver Kopp and Frank Leymann and Bernhard Mitschang and Ronald Steinke},
   title = {{Internet of Things Out of the Box: Using TOSCA for Automating the Deployment of IoT Environments}},
   booktitle = {Proceedings of the 7th International Conference on Cloud Computing and Services Science (CLOSER)},
   editor = {Donald Ferguson and V{\'\i}ctor M{\'e}ndez Mu{\~n}oz and Jorge Cardoso and Markus Helfert and Claus Pahl},
   publisher = {SciTePress Digital Library},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {ScitePress},
   volume = {1},
   pages = {358--367},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2017},
   isbn = {978-989-758-243-1},
   doi = {10.5220/0006243303580367},
   keywords = {Internet of Things; TOSCA; Application Deployment; Device Software},
   language = {Englisch},
   cr-category = {K.6 Management of Computing and Information Systems,     D.2.12 Software Engineering Interoperability},
   ee = {http://scitepress.org/DigitalLibrary/PublicationsDetail.aspx?ID=AuNrRtS4cNc=&t=1},
   contact = {franco-da-silva@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-28&engl=0}
}
@inproceedings {INPROC-2017-13,
   author = {Marc H{\"u}ffmeyer and Pascal Hirmer and Bernhard Mitschang and Ulf Schreier and Matthias Wieland},
   title = {{SitAC – A System for Situation-aware Access Control - Controlling Access to Sensor Data}},
   booktitle = {Proceedings of the 3rd International Conference on Information Systems Security and Privacy},
   editor = {Paolo Mori and Steven Furnell and Olivier Camp},
   address = {Porto, Portugal},
   publisher = {SciTePress},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {ScitePress},
   volume = {1},
   pages = {113--125},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {2017},
   isbn = {978-989-758-209-7},
   keywords = {Authorization; Attribute based Access Control; Situation-awareness; REST; Internet of Things},
   language = {Englisch},
   cr-category = {J.6 Computer-Aided Engineering,     H.3.1 Content Analysis and Indexing},
   ee = {http://www.scitepress.org/DigitalLibrary/PublicationsDetail.aspx?ID=PZW1ep7OUUk%3d&t=1},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-13&engl=0}
}
@inproceedings {INPROC-2017-05,
   author = {Laura Kassner and Pascal Hirmer and Matthias Wieland and Frank Steimle and Jan K{\"o}nigsberger and Bernhard Mitschang},
   title = {{The Social Factory: Connecting People, Machines and Data in Manufacturing for Context-Aware Exception Escalation}},
   booktitle = {Proceedings of the 50th Hawaii International Conference on System Sciences},
   publisher = {Online},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--10},
   type = {Konferenz-Beitrag},
   month = {Januar},
   year = {2017},
   isbn = {978-0-9981331-0-2},
   keywords = {decision support; internet of things; smart manufacturing; social media; text analytics},
   language = {Englisch},
   cr-category = {E.0 Data General,     H.2 Database Management,     H.3 Information Storage and Retrieval,     H.4 Information Systems Applications},
   ee = {http://hdl.handle.net/10125/41355},
   contact = {pascal.hirmer@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Manufacturing environments are socio-technical systems $\backslash$ where people have to interact with machines to achieve $\backslash$ a common goal. The goal of the fourth industrial revolution is $\backslash$ to improve their flexibility for mass customization and rapidly $\backslash$ changing production conditions. As a contribution towards $\backslash$ this goal, we introduce the Social Factory: a social network $\backslash$ with a powerful analytics backend to improve the connection $\backslash$ between the persons working in the production environment, $\backslash$ the manufacturing machines, and the data that is created $\backslash$ in the process. We represent machines, people and chatbots $\backslash$ for information provisioning as abstract users in the social $\backslash$ network. We enable natural language based communication between $\backslash$ them and provide a rich knowledge base and automated $\backslash$ problem solution suggestions. Access to complex production $\backslash$ environments thus becomes intuitive, cooperation among users $\backslash$ improves and problems are resolved more easily.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-05&engl=0}
}
@inproceedings {INPROC-2017-04,
   author = {Matthias Wieland and Pascal Hirmer and Frank Steimle and Christoph Gr{\"o}ger and Bernhard Mitschang and Eike Rehder and Dominik Lucke and Omar Abdul Rahman and Thomas Bauernhansl},
   title = {{Towards a Rule-Based Manufacturing Integration Assistant}},
   booktitle = {Proceedings of the 49th CIRP Conference on Manufacturing Systems (CIRP-CMS 2016); Stuttgart, Germany, May 25-27, 2016},
   editor = {Engelbert Westk{\"a}mper and Thomas Bauernhansl},
   publisher = {Elsevier},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Procedia CIRP},
   volume = {57},
   pages = {213--218},
   type = {Konferenz-Beitrag},
   month = {Januar},
   year = {2017},
   doi = {10.1016/j.procir.2016.11.037},
   keywords = {Rules; Integration; Manufacturing; Smart-Factory; Industrie 4.0},
   language = {Englisch},
   cr-category = {H.4.0 Information Systems Applications General,     J.2 Physical Sciences and Engineering,     I.2.1 Applications and Expert Systems,     I.2.4 Knowledge Representation Formalisms and Methods},
   ee = {http://www.sciencedirect.com/science/article/pii/S221282711631191X},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Recent developments and steadily declining prices in ICT enable an economic application of advanced digital tools in wide areas of manufacturing. Solutions based on concepts and technologies of the Internet of Things or Cyber Physical Systems can be used to implement monitoring as well as self-organization of production, maintenance or logistics processes. However, integration of new digital tools in existing heterogeneous manufacturing IT systems and integration of machines and devices into manufacturing environments is an expensive and tedious task. Therefore, integration issues on IT and manufacturing level significantly prevent agile manufacturing. Especially small and medium-sized enterprises do not have the expertise or the investment possibilities to realize such an integration. To tackle this issue, we present the approach of the Manufacturing Integration Assistant - MIALinx. The objective is to develop and implement a lightweight and easy-to-use integration solution for small and medium-sized enterprises based on recent web automation technologies. MIALinx aims to simplify the integration using simple programmable, flexible and reusable IF-THEN rules that connect occurring situations in manufacturing, such as a machine break down, with corresponding actions, e.g., an automatic maintenance order generation. For this purpose, MIALinx connects sensors and actuators based on defined rules whereas the rule set is defined in a domain-specific, easy-to-use manner to enable rule modeling by domain experts. Through the definition of rule sets, the workers{\^a}€™ knowledge can be also externalized. Using manufacturing-approved cloud computing technologies, we enable robustness, security, and a low-effort, low-cost integration of MIALinx into existing manufacturing environments to provide advanced digital tools also for small and medium-sized enterprises.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-04&engl=0}
}
@inproceedings {INPROC-2016-35,
   author = {Jan K{\"o}nigsberger and Bernhard Mitschang},
   title = {{A Semantically-enabled SOA Governance Repository}},
   booktitle = {Proceedings of the 2016 IEEE 17th International Conference on Information Reuse and Integration},
   editor = {IEEE Computer Society},
   address = {Los Alamitos, California, Washington, Tokyo},
   publisher = {IEEE Computer Society Conference Publishing Services},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {423--432},
   type = {Konferenz-Beitrag},
   month = {August},
   year = {2016},
   isbn = {978-1-5090-3207-5},
   keywords = {SOA; Governance; Repository; Semantic Web},
   language = {Englisch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     H.3.5 Online Information Services,     I.2.4 Knowledge Representation Formalisms and Methods},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Companies in today's world need to cope with an ever greater need for flexible and agile IT systems to keep up with the competition and rapidly changing markets. This leads to increasingly complex system landscapes that are often realized using service-oriented architectures (SOA). Companies often struggle to handle the complexity and the governance activities necessary after this paradigm shift. We therefore present a semantically-enabled SOA Governance Repository as the central tool to manage and govern all SOA-related activities within a company. This repository is based on our previously defined key governance aspects as well as our SOA Governance Meta Model (SOA-GovMM). We describe how our repository is able to support and improve the speed and flexibility of company's IT processes.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-35&engl=0}
}
@inproceedings {INPROC-2016-25,
   author = {Pascal Hirmer and Matthias Wieland and Uwe Breitenb{\"u}cher and Bernhard Mitschang},
   title = {{Dynamic Ontology-based Sensor Binding}},
   booktitle = {Advances in Databases and Information Systems. 20th East European Conference, ADBIS 2016, Prague, Czech Republic, August 28-31, 2016, Proceedings},
   address = {Prague, Czech Republic},
   publisher = {Springer International Publishing},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Information Systems and Applications, incl. Internet/Web, and HCI},
   volume = {9809},
   pages = {323--337},
   type = {Konferenz-Beitrag},
   month = {August},
   year = {2016},
   isbn = {978-3-319-44039-2},
   isbn = {978-3-319-44038-5},
   doi = {10.1007/978-3-319-44039-2},
   keywords = {Internet of Things; Sensors; Ontologies; Data Provisioning},
   language = {Englisch},
   cr-category = {E.0 Data General,     B.8 Performance and Reliability},
   ee = {http://www.springer.com/de/book/9783319440385},
   contact = {pascal.hirmer@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {In recent years, the Internet of Things gains more and more attention through cheap hardware devices and, consequently, an increased interconnection of them. These devices equipped with sensors and actuators form the foundation for so called smart environments that enable monitoring as well as self-organization. However, an efficient sensor registration, binding, and sensor data provisioning is still a major issue for the Internet of Things. Usually, these steps can take up to days or even weeks due to a manual configuration and binding by sensor experts that furthermore have to communicate with domain-experts that define the requirements, e.g. the types of sensors, for the smart environments. In previous work, we introduced a first vision of a method for automated sensor registration, binding, and sensor data provisioning. In this paper, we further detail and extend this vision, e.g., by introducing optimization steps to enhance efficiency as well as effectiveness. Furthermore, the approach is evaluated through a prototypical implementation.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-25&engl=0}
}
@inproceedings {INPROC-2016-22,
   author = {Pascal Hirmer and Matthias Wieland and Uwe Breitenb{\"u}cher and Bernhard Mitschang},
   title = {{Automated Sensor Registration, Binding and Sensor Data Provisioning}},
   booktitle = {Proceedings of the CAiSE'16 Forum, at the 28th International Conference on Advanced Information Systems Engineering (CAiSE 2016)},
   address = {Ljubljana, Slovenia},
   publisher = {CEUR-WS.org},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {CEUR Workshop Proceedings},
   volume = {1612},
   pages = {81--88},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2016},
   issn = {1613-0073},
   keywords = {Internet of Things; Sensors; Ontologies; Data Provisioning},
   language = {Englisch},
   cr-category = {J.6 Computer-Aided Engineering,     H.3.1 Content Analysis and Indexing},
   ee = {http://ceur-ws.org/Vol-1612/paper11.pdf},
   contact = {pascal.hirmer@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Today, the Internet of Things has evolved due to an increasing interconnection of technical devices. However, the automated binding and management of things and sensors is still a major issue. In this paper, we present a method and system architecture for sensor registration, binding, and sensor data provisioning. This approach enables automated sensor integration and data processing by accessing the sensors and provisioning the data. Furthermore, the registration of new sensors is done in an automated way to avoid a complex, tedious manual registration. We enable (i) semantic description of sensors and things as well as their attributes using ontologies, (ii) the registration of sensors of a physical thing, (iii) a provisioning of sensor data using different data access paradigms, and (iv) dynamic sensor binding based on application requirements. We provide the Resource Management Platform as a prototypical implementation of the architecture and corresponding runtime measurements},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-22&engl=0}
}
@inproceedings {INPROC-2016-09,
   author = {Christoph Stach and Bernhard Mitschang},
   title = {{The Secure Data Container: An Approach to Harmonize Data Sharing with Information Security}},
   booktitle = {Proceedings of the 17th International Conference on Mobile Data Management},
   address = {Porto},
   publisher = {IEEE Computer Society Conference Publishing Services},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {292--297},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2016},
   keywords = {smart devices; information security; data sharing},
   language = {Englisch},
   cr-category = {K.4.1 Computers and Society Public Policy Issues,     D.4.6 Operating Systems Security and Protection},
   contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Smart devices became Marc Weiser's Computer of the 21st Century. Due to their versatility a lot of private data enriched by context data are stored on them. Even the health industry utilizes smart devices as portable health monitors and enablers for telediagnosis. So they represent a severe risk for information security. Yet the platform providers' countermeasures to these threats are by no means sufficient. In this paper we describe how information security can be improved. Therefore, we postulate requirements towards a secure handling of data. Based on this requirements specification, we introduce a secure data container as an extension for the Privacy Management Platform. Since a complete isolation of an app is usually not practicable, our approach also provides secure data sharing features. Finally, we evaluate our approach from a technical point of view as well as a security point of view and show its applicability in an eHealth scenario.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-09&engl=0}
}
@inproceedings {INPROC-2016-07,
   author = {Christoph Gr{\"o}ger and Laura Kassner and Eva Hoos and Jan K{\"o}nigsberger and Cornelia Kiefer and Stefan Silcher and Bernhard Mitschang},
   title = {{The Data-Driven Factory. Leveraging Big Industrial Data for Agile, Learning and Human-Centric Manufacturing}},
   booktitle = {Proceedings of the 18th International Conference on Enterprise Information Systems},
   editor = {Slimane Hammoudi and Leszek Maciaszek and Michele M. Missikoff and Olivier Camp and Jose Cordeiro},
   publisher = {SciTePress},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {40--52},
   type = {Konferenz-Beitrag},
   month = {April},
   year = {2016},
   isbn = {978-989-758-187-8},
   keywords = {IT Architecture, Data Analytics, Big Data, Smart Manufacturing, Industrie 4.0},
   language = {Englisch},
   cr-category = {H.4.0 Information Systems Applications General,     J.2 Physical Sciences and Engineering},
   contact = {Email an Christoph.Groeger@ipvs.uni-stuttgart.de oder laura.kassner@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Global competition in the manufacturing industry is characterized by ever shorter product life cycles, increas-ing complexity and a turbulent environment. High product quality, continuously improved processes as well as changeable organizational structures constitute central success factors for manufacturing companies. With the rise of the internet of things and Industrie 4.0, the increasing use of cyber-physical systems as well as the digitalization of manufacturing operations lead to massive amounts of heterogeneous industrial data across the product life cycle. In order to leverage these big industrial data for competitive advantages, we present the concept of the data-driven factory. The data-driven factory enables agile, learning and human-centric manu-facturing and makes use of a novel IT architecture, the Stuttgart IT Architecture for Manufacturing (SITAM), overcoming the insufficiencies of the traditional information pyramid of manufacturing. We introduce the SITAM architecture and discuss its conceptual components with respect to service-oriented integration, ad-vanced analytics and mobile information provisioning in manufacturing. Moreover, for evaluation purposes, we present a prototypical implementation of the SITAM architecture as well as a real-world application sce-nario from the automotive industry to demonstrate the benefits of the data-driven factory.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-07&engl=0}
}
@inproceedings {INPROC-2016-06,
   author = {Laura Kassner and Bernhard Mitschang},
   title = {{Exploring Text Classification for Messy Data: An Industry Use Case for Domain-Specific Analytics}},
   booktitle = {Advances in Database Technology - EDBT 2016, 19th International Conference on Extending Database Technology, Bordeaux, France, March 15-16, Proceedings},
   publisher = {OpenProceedings.org},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {491--502},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {2016},
   isbn = {978-3-89318-070-7},
   keywords = {recommendation system; automotive; text analytics; domain-specific language; automatic classification},
   language = {Englisch},
   cr-category = {H.3.1 Content Analysis and Indexing,     H.3.3 Information Search and Retrieval,     H.4.2 Information Systems Applications Types of Systems,     J.1 Administration Data Processing},
   ee = {http://openproceedings.org/2016/conf/edbt/paper-52.pdf},
   contact = {Email an laura.kassner@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Industrial enterprise data present classification problems which are different from those problems typically discussed in the scientific community -- with larger amounts of classes and with domain-specific, often unstructured data. We address one such problem through an analytics environment which makes use of domain-specific knowledge. Companies are beginning to use analytics on large amounts of text data which they have access to, but in day-to-day business, manual effort is still the dominant method for processing unstructured data. In the face of ever larger amounts of data, faster innovation cycles and higher product customization, human experts need to be supported in their work through data analytics. In cooperation with a large automotive manufacturer, we have developed a use case in the area of quality management for supporting human labor through text analytics: When processing damaged car parts for quality improvement and warranty handling, quality experts have to read text reports and assign error codes to damaged parts. We design and implement a system to recommend likely error codes based on the automatic recognition of error mentions in textual quality reports. In our prototypical implementation, we test several methods for filtering out accurate recommendations for error codes and develop further directions for applying this method to a competitive business intelligence use case.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-06&engl=0}
}
@inproceedings {INPROC-2015-62,
   author = {Eva Maria Grochowski and Eva Hoos and Stefan Waitzinger and Dieter Spath and Bernhard Mitschang},
   title = {{Web-based collaboration system for interdisciplinary and interorganizational development teams: case study}},
   booktitle = {Proceeding of the 23rd International Conference on Production Research},
   publisher = {-},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--11},
   type = {Konferenz-Beitrag},
   month = {August},
   year = {2015},
   keywords = {Collaboration; Web-based Platform},
   language = {Englisch},
   cr-category = {J.1 Administration Data Processing},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The automotive industry faces three major challenges – shortage of fossil fuels, politics of global warming and rising competition. In order to remain competitive companies have to develop more efficient and alternative fuel vehicles. Out of these challenges new cooperation models become inevitable. The development of complex products like automobiles claims skills of various disciplines e.g. engineering, IT. Furthermore, these skills are spread all over various companies within the supply chain and beyond. Hence, supporting IT systems for collaborative, innovative work is absolutely essential. Interdisciplinary and interorganizational development has new demands on information systems. These demands are not well analyzed at the moment and therefore, existing collaboration platforms cannot address them. In order to determine these new requirements and show the gap to existing collaboration platform we performed a case study. In this case study, we analyze the research campus “Active Research Environment for the Next Generation of Automobiles” (ARENA2036). It is a is a new cooperation form, where diverse partners from the industry, research institutes and universities elaborate collaboratively future topics in the field of production and light weight construction under “one single roof”. We focus on the special needs of the interdisciplinary, interorganizational partners. The requirements were polled by a questionnaire. About 80 percent of the active research workers in ARENA2036 answered the questionnaire. By the answers we can identify the special needs and also role profiles of the collaborators. The resulting role profiles specify the personal requirements. These are used for an evaluation of existing information platforms. The deficits between the offered features and the demands of the partners, as well as new technologies supporting the individual needs of users are the foundation for the information system concept for ARENA2036. In our findings we present a role-based view on requirements for the development of an information system for collaboration and cooperation. Based on these requirements we then develop a concept for mobile apps with focus on a role-based design.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2015-62&engl=0}
}
@inproceedings {INPROC-2015-34,
   author = {Pascal Hirmer and Matthias Wieland and Holger Schwarz and Bernhard Mitschang and Uwe Breitenb{\"u}cher and Frank Leymann},
   title = {{SitRS - A Situation Recognition Service based on Modeling and Executing Situation Templates}},
   booktitle = {Proceedings of the 9th Symposium and Summer School On Service-Oriented Computing},
   editor = {Johanna Barzen and Rania Khalaf and Frank Leymann and Bernhard Mitschang},
   publisher = {IBM Research Report},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Technical Paper},
   volume = {RC25564},
   pages = {113--127},
   type = {Konferenz-Beitrag},
   month = {Dezember},
   year = {2015},
   keywords = {Situation Recognition, IoT, Context, Integration, Cloud Computing, OSLC},
   language = {Englisch},
   cr-category = {J.6 Computer-Aided Engineering,     H.3.1 Content Analysis and Indexing},
   ee = {http://domino.research.ibm.com/library/cyberdig.nsf/papers/656B934403848E8A85257F1D00695A63},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Today, the Internet of Things has evolved due to an advanced connectivity of physical objects. Furthermore, Cloud Computing gains more and more interest for the provisioning of services. In this paper, we want to further improve the integration of these two areas by providing a cloud-based situation recognition service – SitRS. This service can be used to integrate real world objects – the things – into the internet by deriving their situational state based on sensors. This enables context-aware applications to detect events in a smart environment. SitRS is a basic service enabling a generic and easy implementation of Smart* applications such as SmartFactorys, SmartCities, SmartHomes. This paper introduces an approach containing a method and a system architecture for the realization of such a service. The core steps of the method are: (i) registration of the sensors, (ii) modeling of the situation, and (iii) execution of the situation recognition. Furthermore, a prototypical implementation of SitRS is presented and evaluated via runtime measurements.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2015-34&engl=0}
}
@inproceedings {INPROC-2015-33,
   author = {Pascal Hirmer and Peter Reimann and Matthias Wieland and Bernhard Mitschang},
   title = {{Extended Techniques for Flexible Modeling and Execution of Data Mashups}},
   booktitle = {Proceedings of the 4th International Conference on Data Management Technologies and Applications (DATA)},
   editor = {Markus Helfert and Andreas Holzinger and Orlando Belo and Chiara Francalanci},
   address = {Colmar},
   publisher = {SciTePress},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {111--122},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2015},
   isbn = {978-989-758-103-8},
   keywords = {Data Mashups, Ad-hoc Integration, Patterns, Data Flow},
   language = {Englisch},
   cr-category = {E.1 Data Structures,     E.5 Data Files},
   contact = {pascal.hirmer@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Today, a multitude of highly-connected applications and information systems hold, consume and produce huge amounts of heterogeneous data. The overall amount of data is even expected to dramatically increase in the future. In order to conduct, e.g., data analysis, visualizations or other value-adding scenarios, it is necessary to integrate specific, relevant parts of data into a common source. Due to oftentimes changing environments and dynamic requests, this integration has to support ad-hoc and flexible data processing capabilities. Furthermore, an iterative and explorative trial-and-error integration based on different data sources has to be possible. To cope with these requirements, several data mashup platforms have been developed in the past. However, existing solutions are mostly non-extensible, monolithic systems or applications with many limitations regarding the mentioned requirements. In this paper, we introduce an approach that copes with these issues (i) by the introduction of patterns to enable decoupling from implementation details, (ii) by a cloud-ready approach to enable availability and scalability, and (iii) by a high degree of flexibility and extensibility that enables the integration of heterogeneous data as well as dynamic (un-)tethering of data sources. We evaluate our approach using runtime measurements of our prototypical implementation.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2015-33&engl=0}
}
@inproceedings {INPROC-2015-25,
   author = {Frank Steimle and Matthias Wieland and Bernhard Mitschang and Sebastian Wagner and Frank Leymann},
   title = {{Design and Implementation Issues of a Secure Cloud-Based Health Data Management System}},
   booktitle = {Proceedings of the 9th Symposium and Summer School On Service-Oriented Computing},
   editor = {Johanna Barzen and Rania Khalaf and Frank Leymann and Bernhard Mitschang},
   publisher = {IBM Research Report},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Technical Paper},
   volume = {RC25564},
   pages = {68--82},
   type = {Konferenz-Beitrag},
   month = {Dezember},
   year = {2015},
   keywords = {eHealth; mHealth; cloud data; data analysis; security},
   language = {Englisch},
   cr-category = {C.2.4 Distributed Systems,     H.2.8 Database Applications,     J.3 Life and Medical Sciences},
   ee = {http://domino.research.ibm.com/library/cyberdig.nsf/papers/656B934403848E8A85257F1D00695A63},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {eHealth gains more and more interest since a lot of end-user devices which support health data capturing are available. The captured data has to be managed and securely stored, in order to access it from different devices and to share it with other users such as physicians. The aim of the German-Greek research project ECHO is to support the treatment of patients, who suffer from Chronic Obstructive Pulmonary Disease (COPD), a chronic respiratory disease. Usually the patients need to be examined by their physicians on a regular basis due to their chronic condition. Since this is very time consuming and expensive, we develop an eHealth system which allows the physician to monitor patients conditions remotely, e.g., via smart phones. Therefore, a secure data processing and sharing eHealth platform is required. In this paper we introduce a health data model and a corresponding platform-architecture for the management and analysis of the data provided by the patients. Furthermore, we show how the security of the data is ensured and we explain how the platform can be hosted in a cloud-based environment using the OASIS standard TOSCA, which enables a self-contained and portable description and management of cloud-services.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2015-25&engl=0}
}
@inproceedings {INPROC-2015-15,
   author = {Laura Kassner and Bernhard Mitschang},
   title = {{MaXCept – Decision Support in Exception Handling through Unstructured Data Integration in the Production Context. An Integral Part of the Smart Factory.}},
   booktitle = {Proceedings of the 48th Hawaii International Conference on System Sciences: HICSS 48, 2015},
   publisher = {IEEE Computer Society},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1007--1016},
   type = {Konferenz-Beitrag},
   month = {Januar},
   year = {2015},
   keywords = {smart manufacturing; industrial internet; unstructured data; data integration; exception escalation; expert search},
   language = {Englisch},
   cr-category = {H.4.0 Information Systems Applications General,     J.1 Administration Data Processing,     J.7 Computers in Other Systems},
   contact = {laura.kassner@gsame.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Today, data from different sources and different phases of the product life cycle are usually analyzed in isolation and with considerable time delay. Real-time integrated analytics is especially beneficial in a production context. We present an architecture fordata- and analytics-driven exception escalation in manufacturing and show the advantages of integrating unstructured data.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2015-15&engl=0}
}
@inproceedings {INPROC-2014-76,
   author = {Peter Reimann and Holger Schwarz and Bernhard Mitschang},
   title = {{A Pattern Approach to Conquer the Data Complexity in Simulation Workflow Design}},
   booktitle = {Proceedings of OnTheMove Federated Conferences and Workshops (OTM), 22nd International Conference on Cooperative Information Systems (CoopIS 2014)},
   editor = {R. Meersman et al.},
   address = {Amantea, Italy},
   publisher = {Springer Berlin Heidelberg},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {LNCS},
   volume = {8841},
   pages = {21--38},
   type = {Konferenz-Beitrag},
   month = {Oktober},
   year = {2014},
   keywords = {Data Provisioning; Data Management Patterns; SIMPL; Simulation Workflow; Simulation Workflow Design; Workflow; Workflow Design},
   language = {Englisch},
   cr-category = {H.2.5 Heterogeneous Databases,     H.2.8 Database Applications,     H.4.1 Office Automation},
   contact = {Peter Reimann Peter.Reimann@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Scientific workflows may be used to enable the collaborative implementation of scientific applications across various domains. Since each domain has its own requirements and solutions for data handling, such workflows often have to deal with a highly heterogeneous data environment. This results in an increased complexity of workflow design. As scientists typically design their scientific workflows on their own, this complexity hinders them to concentrate on their core issue, namely the experiments, analyses, or simulations they conduct. In this paper, we present a novel approach to a pattern-based abstraction support for the complex data management in simulation workflows that goes beyond related work in similar research areas. A pattern hierarchy with different abstraction levels enables a separation of concerns according to the skills of different persons involved in workflow design. The goal is that scientists are no longer obliged to specify low-level details of data management in their workflows. We discuss the advantages of this approach and show to what extent it reduces the complexity of simulation workflow design. Furthermore, we illustrate how to map patterns onto executable workflows. Based on a prototypical implementation of three real-world simulations, we evaluate our approach according to relevant requirements.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-76&engl=0}
}
@inproceedings {INPROC-2014-75,
   author = {Jan K{\"o}nigsberger and Stefan Silcher and Bernhard Mitschang},
   title = {{SOA-GovMM: A Meta Model for a Comprehensive SOA Governance Repository}},
   booktitle = {Proceedings of the 2014 IEEE 15th International Conference on Information Reuse and Integration},
   editor = {James Joshi and Elisa Bertino and Bhavani Thuraisingham and Ling Liu},
   address = {Piscataway, NJ, USA},
   publisher = {IEEE Systems, Man, and Cybernetics Society (SMC)},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {187--194},
   type = {Konferenz-Beitrag},
   month = {August},
   year = {2014},
   isbn = {978-1-4799-5880-1},
   keywords = {Service-Oriented Architecture, SOA Governance, Meta Model, Governance Repository},
   language = {Englisch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     H.3.3 Information Search and Retrieval},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2014-75/INPROC-2014-75.pdf},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {In recent years, the paradigm of service-oriented architecture (SOA) has more and more found its way into many organizations. The SOA principles of loosely coupled and reusable services has convinced decision makers in many organizations to start SOA initiatives. Yet, the lack of proper governance mechanisms has doomed many projects to fail. Although some SOA governance frameworks exist, they differ highly in scope and none of them covers the whole spectrum necessary to properly govern a SOA. In this paper we identify and discuss eleven core areas the governance of a SOA has to cover in order to realize the intended benefit in flexibility and agility. We then analyze and evaluate existing SOA governance frameworks with regard to those requirements. Subsequently, we present a meta model composed of four parts: Service Provider, Service Consumer, Organizational Structure and Business Object. We show, that those four parts cover all requirements for a comprehensive SOA governance repository. This allows an organization to leverage the information integrated in the repository to better govern their SOA and therefore improve the chances of its success.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-75&engl=0}
}
@inproceedings {INPROC-2014-64,
   author = {Eva Hoos and Christoph Gr{\"o}ger and Bernhard Mitschang},
   title = {{Mobile Apps in Engineering: A Process-Driven Analysis of Business Potentials and Technical Challenges}},
   booktitle = {Proceedings of the 9th CIRP Conference on Intelligent Computation in Manufacturing Engineering (CIRP ICME), 23-25 July, 2014, Capri (Naples), Italy},
   publisher = {CIRP},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2014},
   language = {Deutsch},
   cr-category = {H.4.0 Information Systems Applications General,     J.4 Social and Behavioral Sciences,     J.2 Physical Sciences and Engineering},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Mobile apps on smartphones and tablet PCs are more and more employed in enterprises to optimize business processes, e.g. by elimination of paper-based data collection. With respect to engineering, mobile apps provide a huge potential for increased flexibility and efficiency due to their anywhere and anytime characteristics, e.g., for product testing in the field. However, not every usage of mobile apps is beneficial from a business point of view and existing apps for engineering represent only rudimentary front-ends for stationary IT systems without an app-oriented redesign. Hence, there are three core challenges to leverage the potential of mobile apps in engineering: (1) identifying value-added app usage scenarios from a process point of view, (2) realizing a task-oriented and context-aware user interface design and (3) mastering technical obstacles at the app implementation. In this paper, we address these challenges by a case-oriented analysis of selected engineering processes in the automotive industry in order to identify engineering tasks suited for the usage of mobile apps. On this basis, we design corresponding engineering apps and analyze their business potentials. Moreover, we derive common technological challenges for the development of engineering apps, e.g. data synchronization aspects, and highlight further research issues.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-64&engl=0}
}
@inproceedings {INPROC-2014-61,
   author = {Marina Bitsaki and Christos Koutras and Georgios Koutras and Frank Leymann and Bernhard Mitschang and Christos Nikolaou and Nikos Siafakas and Steve Strauch and Nikos Tzanakis and Matthias Wieland},
   title = {{An Integrated mHealth Solution for Enhancing Patients' Health Online}},
   booktitle = {Proceedings of the 6th European Conference of the International Federation for Medical and Biological Engineering (MBEC'14)},
   publisher = {International Federation for Medical and Biological Engineering (IFMBE)},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--4},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2014},
   keywords = {Patient monitoring; COPD; ICT application services; Cloud technology; Online services; Mobile applications; Intelligent data mining},
   language = {Englisch},
   cr-category = {C.2.4 Distributed Systems,     H.2.8 Database Applications,     H.4.1 Office Automation,     J.3 Life and Medical Sciences},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2014-61/INPROC-2014-61.pdf},
   contact = {a href=``http://www.iaas.uni-stuttgart.de/institut/mitarbeiter/strauch''Steve Strauch/ a},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {Lack of time or economic difficulties prevent chronic obstructive pulmonary disease patients from communicating with their physicians, thus inducing exacerbation of their chronic condition and possible hospitalization. In this paper we propose a platform that integrates mobile application technologies and cloud computing to provide regular monitoring of patients and avoidance of medical emergencies.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-61&engl=0}
}
@inproceedings {INPROC-2014-59,
   author = {Laura Kassner and Christoph Gr{\"o}ger and Bernhard Mitschang and Engelbert Westk{\"a}mper},
   title = {{Product Life Cycle Analytics - Next Generation Data Analytics on Structured and Unstructured Data}},
   booktitle = {Proceedings of the 9th CIRP Conference on Intelligent Computation in Manufacturing Engineering - CIRP ICME '14},
   address = {Naples},
   publisher = {Elsevier},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--6},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2014},
   keywords = {analytics, big data, unstructured data, text analytics, product life cycle management, PLM, data warehousing, product life cycle analytics, data integration},
   language = {Englisch},
   cr-category = {H.3.1 Content Analysis and Indexing,     H.3.4 Information Storage and Retrieval Systems and Software,     J.2 Physical Sciences and Engineering,     J.6 Computer-Aided Engineering},
   contact = {Per Mail an laura.kassner@gsame.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Enormous amounts of unstructured data, e. g., emails, failure reports and customer complaints, are abundant around the product life cycle and provide a huge potential for analytics-driven optimization. However, existing analytics approaches on unstructured data are fraught with three major insufficiencies limiting comprehensive business improvement: (1) they focus on isolated data sources from a single life cycle phase {\^a}€“ for example, data from a customer relationship management system are mined for frequent complaints without considering manufacturing failure reports related to the same product; (2) they do not make use of structured data for holistic analytics, e. g., to automatically correlate unstructured failure reports with structured performance data of a manufacturing execution system; (3) existing implementations of data integration and analytics components are typically cost-intensive, manual and case-based, without a general framework. To address these issues, we present our Product Life Cycle Analytics (PLCA) approach, a platform and a reference architecture for the holistic integration and analysis of unstructured and structured data from multiple data sources around the product life cycle. For this purpose, we survey structured and unstructured data sources around the product life cycle and discuss limitations of existing analytics approaches like traditional Business Intelligence applications. Moreover, we develop use cases for holistic life-cycle-oriented analytics and give examples based on case study investigations, e. g., for the holistic analysis of unstructured failure reports in the automotive industry. On this basis, we discuss technical requirements and components of our reference architecture, such as a versatile, plug-and-play Natural Language Processing pipeline and mechanisms for linking structured and unstructured data in a holistic data warehouse. Finally, we analyse implementation issues and investigate underlying technologies from the areas of text analytics and data mining in order to evaluate our architecture with respect to the identified use cases.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-59&engl=0}
}
@inproceedings {INPROC-2014-58,
   author = {Christoph Stach and Bernhard Mitschang},
   title = {{Design and Implementation of the Privacy Management Platform}},
   booktitle = {Proceedings of the 15th International Conference on Mobile Data Management},
   address = {Brisbane},
   publisher = {IEEE Computer Society Conference Publishing Services},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--4},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2014},
   keywords = {Android; policy model; implementation strategies},
   language = {Englisch},
   cr-category = {K.4.1 Computers and Society Public Policy Issues,     D.4.6 Operating Systems Security and Protection},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Nowadays, mobile platform vendors have to concern themselves increasingly about how to protect their users' privacy. As Google is less restrictive than their competitors regarding their terms of use for app developers, it is hardly surprising that malware spreads even in Google Play. To make matters worse, in Android every user is responsible for his or her private data and s/he is frequently overwhelmed with this burden because of the fragile Android permission mechanism. Thus, the calls for a customizable, fine-grained, context-based, crash-proof, and intuitive privacy management system are growing louder. To cope with these requests, we introduce the Privacy Management Platform (PMP) and we discuss three alternative implementation strategies for such a system.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-58&engl=0}
}
@inproceedings {INPROC-2014-50,
   author = {Peter Reimann and Holger Schwarz and Bernhard Mitschang},
   title = {{Data Patterns to Alleviate the Design of Scientific Workflows Exemplified by a Bone Simulation}},
   booktitle = {Proceedings of the 26th International Conference on Scientific and Statistical Database Management},
   address = {Aalborg, Denmark},
   publisher = {ACM},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2014},
   keywords = {Data Provisioning; Data Management Patterns; Workflow; SIMPL; Simulation Workflow; BPEL; WS-BPEL},
   language = {Englisch},
   cr-category = {H.2.5 Heterogeneous Databases,     H.2.8 Database Applications,     H.4.1 Office Automation},
   contact = {Peter Reimann Peter.Reimann@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Scientific workflows often have to process huge data sets in a multiplicity of data formats. For that purpose, they typically embed complex data provisioning tasks that transform these heterogeneous data into formats the underlying tools or services can handle. This results in an increased complexity of workflow design. As scientists typically design their scientific workflows on their own, this complexity hinders them to concentrate on their core issue, namely the experiments, analyses, or simulations they conduct. In this paper, we present the core idea of a pattern-based approach to alleviate the design of scientific workflows. This approach is particularly targeted at the needs of scientists. We exemplify and assess the pattern-based design approach by applying it to a complex scientific workflow realizing a real-world simulation of structure changes in bones.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-50&engl=0}
}
@inproceedings {INPROC-2014-49,
   author = {Christoph Gr{\"o}ger and Holger Schwarz and Bernhard Mitschang},
   title = {{The Deep Data Warehouse. Link-based Integration and Enrichment of Warehouse Data and Unstructured Content}},
   booktitle = {Proceedings of the 18th IEEE International Enterprise Distributed Object Computing Conference (EDOC), 01-05 September, 2014, Ulm, Germany},
   publisher = {IEEE},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2014},
   language = {Englisch},
   cr-category = {H.2.7 Database Administration},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Data warehouses are at the core of enterprise IT and enable the efficient storage and analysis of structured data. Besides, unstructured content, e.g., emails and documents, constitutes more than half of the entire enterprise data and contains a lot of implicit knowledge about warehouse entities. Thus, holistic ana-lytics require the integration of structured warehouse data and unstructured content to generate novel insights. These insights can also be used to enrich the integrated data and to create a new basis for further analytics. Existing integration approaches only support a limited range of analytical applications and require the costly adaptation of the warehouse schema. In this paper, we present the Deep Data Warehouse (DeepDWH), a novel type of data warehouse based on the flexible integration and enrichment of warehouse data and unstructured content, addressing the variety challenge of Big Data. It relies on information-rich in-stance-level links between warehouse elements and content items, which are represented in a graph-oriented structure. Neither adaptations of the existing warehouse nor the design of an overall federated schema are required. We design a conceptual linking model and develop a logical schema for links based on a property graph. As a proof of concept, we present a prototypical imple-mentation of the DeepDWH including a link store based on a graph database.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-49&engl=0}
}
@inproceedings {INPROC-2014-28,
   author = {Christoph Gr{\"o}ger and Holger Schwarz and Bernhard Mitschang},
   title = {{Prescriptive Analytics for Recommendation-based Business Process Optimization}},
   booktitle = {Proceedings of the 17th International Conference on Business Information Systems (BIS), 22-23 May, 2014, Larnaca, Cyprus},
   editor = {Witold Abramowicz and Angelika Kokkinaki},
   publisher = {Springer},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {176},
   pages = {25--37},
   type = {Konferenz-Beitrag},
   month = {Mai},
   year = {2014},
   keywords = {Prescriptive Analytics, Process Optimization, Process Warehouse, Data Mining, Business Intelligence, Decision Support},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Continuously improved business processes are a central success factor for companies. Yet, existing data analytics do not fully exploit the data generated during process execution. Particularly, they miss prescriptive techniques to transform analysis results into improvement actions. In this paper, we present the data-mining-driven concept of recommendation-based business process op-timization on top of a holistic process warehouse. It prescriptively generates ac-tion recommendations during process execution to avoid a predicted metric de-viation. We discuss data mining techniques and data structures for real-time prediction and recommendation generation and present a proof of concept based on a prototypical implementation in manufacturing.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-28&engl=0}
}
@inproceedings {INPROC-2014-14,
   author = {Eva Hoos and Christoph Gr{\"o}ger and Stefan Kramer and Bernhard Mitschang},
   title = {{Improving Business Processes through Mobile Apps - An Analysis Framework to Identify Value-added App Usage Scenarios}},
   booktitle = {Proceedings of the 16th International Conference on Enterprise Information Systems (ICEIS), 27-30 April, 2014, Lisbon, Portugal},
   publisher = {SciTePress},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   type = {Konferenz-Beitrag},
   month = {April},
   year = {2014},
   keywords = {Business Processes; Analysis Framework; Mobile Application},
   language = {Englisch},
   cr-category = {H.1.1 Systems and Information Theory,     K.6.1 Project and People Management},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Mobile apps offer new possibilities to improve business processes. However, the introduction of mobile apps is typically carried out from a technology point of view. Hence, process improvement from a business point of view is not guaranteed. There is a methodological lack for a holistic analysis of business processes regarding mobile technology. For this purpose, we present an analysis framework, which comprises a systematic methodology to identify value-added usage scenarios of mobile technology in business processes with a special focus on mobile apps. The framework is based on multi-criteria analysis and portfolio analy- sis techniques and it is evaluated in a case-oriented investigation in the automotive industry.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-14&engl=0}
}
@inproceedings {INPROC-2014-10,
   author = {Christoph Gr{\"o}ger and Holger Schwarz and Bernhard Mitschang},
   title = {{The Manufacturing Knowledge Repository. Consolidating Knowledge to Enable Holistic Process Knowledge Management in Manufacturing}},
   booktitle = {Proceedings of the 16th International Conference on Enterprise Information Systems (ICEIS), 27-30 April, 2014, Lisbon, Portugal},
   publisher = {SciTePress},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   type = {Konferenz-Beitrag},
   month = {April},
   year = {2014},
   language = {Englisch},
   cr-category = {H.2.7 Database Administration,     J.1 Administration Data Processing},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The manufacturing industry is faced with strong competition making the companies’ knowledge resources and their systematic management a critical success factor. Yet, existing concepts for the management of process knowledge in manufacturing are characterized by major shortcomings. Particularly, they are either exclusively based on structured knowledge, e. g., formal rules, or on unstructured knowledge, such as documents, and they focus on isolated aspects of manufacturing processes. To address these issues, we present the Manufacturing Knowledge Repository, a holistic repository that consolidates structured and unstructured process knowledge to facilitate knowledge management and process optimization in manufacturing. First, we define requirements, especially the types of knowledge to be handled, e. g., data mining models and text documents. Next, we develop a conceptual repository data model associating knowledge items and process components such as machines and process steps. Furthermore, we discuss implementation issues including storage architecture variants and present both an evaluation of the data model and a proof of concept based on a prototypical implementation.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-10&engl=0}
}
@inproceedings {INPROC-2013-43,
   author = {Tim Waizenegger and Matthias Wieland and Tobias Binz and Uwe Breitenb{\"u}cher and Florian Haupt and Oliver Kopp and Frank Leymann and Bernhard Mitschang and Alexander Nowak and Sebastian Wagner},
   title = {{Policy4TOSCA: A Policy-Aware Cloud Service Provisioning Approach to Enable Secure Cloud Computing}},
   booktitle = {On the Move to Meaningful Internet Systems: OTM 2013 Conferences},
   editor = {Robert Meersman and Herve Panetto and Tharam Dillon and Johann Eder and Zohra Bellahsene and Norbert Ritter and Pieter De Leenheer and Dou Deijing},
   address = {Heidelberg},
   publisher = {Springer Berlin Heidelberg},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Lecture Notes in Computer Science (LNCS)},
   volume = {8185},
   pages = {360--376},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2013},
   isbn = {978-3-642-41029-1},
   doi = {10.1007/978-3-642-41030-7_26},
   keywords = {Cloud Computing, TOSCA, Cloud Service, Cloud Management, Policy-Framework, Security, Green-IT, Sustainable Cloud Service},
   language = {Englisch},
   cr-category = {D.2.7 Software Engineering Distribution, Maintenance, and Enhancement,     D.2.9 Software Engineering Management,     D.2.13 Software Engineering Reusable Software},
   contact = {tim.waizenegger@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {With the growing adoption of Cloud Computing, automated deployment and provisioning systems for Cloud applications are becoming more prevalent. They help to reduce the onboarding costs for new customers as well as the financial impact of managing Cloud Services by automating these previously manual tasks. With the widespread use of such systems, the adoption of a common standard for describing Cloud applications will provide a crucial advantage by enabling reusable and portable applications. TOSCA, a newly published standard by OASIS with broad industry participation provides this opportunity. Besides the technical requirements of running and managing applications in the cloud, non-functional requirements, like cost, security, and environmental issues, are of special importance when moving towards the automated provisioning and management of Cloud applications. In this paper we demonstrate how non-functional requirements are defined in TOSCA using policies. We propose a mechanism for automatic processing of these formal policy definitions in a TOSCA runtime environment that we have developed based on the proposed architecture of the TOSCA primer. In order to evaluate our approach, we present prototypical implementations of security policies for encrypting databases and for limiting the geographical location of the Cloud servers. We demonstrate how our runtime environment is ensuring these policies and show how they affect the deployment of the application.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2013-43&engl=0}
}
@inproceedings {INPROC-2013-38,
   author = {Stefan Silcher and Jan K{\"o}nigsberger and Peter Reimann and Bernhard Mitschang},
   title = {{Cooperative service registries for the service-based Product Lifecycle Management architecture}},
   booktitle = {Proceedings of the 17th IEEE International Conference on Computer Supported Cooperative Work in Design (CSCWD '13)},
   publisher = {IEEE Xplore},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {439--446},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2013},
   isbn = {978-1-4673-6083-8},
   doi = {10.1109/CSCWD.2013.6581003},
   keywords = {Collaborative Product Lifecycle Management; Cooperative Service Registries; Enterprise Service Bus; Service-oriented Architecture},
   language = {Englisch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     D.2.13 Software Engineering Reusable Software,     H.3.4 Information Storage and Retrieval Systems and Software,     J.1 Administration Data Processing},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Product Lifecycle Management (PLM) comprises many different tasks across multiple domains, such as product development and production. Thus, multidisciplinary engineering teams have to collaborate to successfully design and produce products. Nowadays, engineers are supported with many software solutions, which are tailored to the work of each engineer. The problem is the missing or bad integration between these IT solutions, which leads to noncontinuous processes and an insufficient cooperation. The Service-oriented Architecture (SOA) supports the needed flexible integration of applications based on services and moreover an automation and integration of processes via workflows. In previous work, we proposed a service-oriented PLM architecture that provides these benefits and supports continuous processes. Thereby, services of different domains and phases of the product life cycle need to collaborate in a distributed fashion. In this paper, we systematically identify, define and rate representative models for the management of corresponding distributed service registries, which enable an efficient collaboration of services. Based on a prototypical implementation of the best-rated model in a layout redesign scenario, we assess our approach for its suitability in PLM. The selected service registry model provides transparent access to all services of different domains and shows the ease of integrating new applications into the product life cycle. It thereby enables an improved cooperation of engineers across various domains to define cross-domain processes.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2013-38&engl=0}
}
@inproceedings {INPROC-2013-34,
   author = {Stefan Silcher and Barbara Seeberg and Erich Zahn and Bernhard Mitschang},
   title = {{A Holistic Management Model for Manufacturing Companies and Related IT Support}},
   booktitle = {Procedia CIRP},
   publisher = {CIRP},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {175--180},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2013},
   issn = {2212-8271},
   doi = {http://dx.doi.org/10.1016/j.procir.2013.05.030},
   keywords = {Product Lifecycle Management, Supply Chain Management, Factory Lifecycle Management, Holistic Management Model, IT Integration, Service-oriented Architecture, Enterprise Service Bus},
   language = {Englisch},
   cr-category = {C.1.3 Processor Architectures, Other Architecture Styles,     J.1 Administration Data Processing},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Abstract Life cycle and management concepts are a necessity to compete in current turbulent markets. Small- and medium-sized enterprises (SME) struggle when realizing such concepts and accordant $\backslash${IT$\backslash$} support. In this paper we review different concepts and their similarities and differences are discussed. We focus on Product Lifecycle Management (PLM), Supply Chain Management and Factory Lifecycle Management to integrate them into a holistic management model. Subsequently, we extend a service-based $\backslash${PLM$\backslash$} architecture to support the holistic management model to continuously support processes. The usage of standardized technologies allows companies, and especially SMEs, to implement this architecture with low costs and effort.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2013-34&engl=0}
}
@inproceedings {INPROC-2013-32,
   author = {Carlos L{\"u}bbe and Bernhard Mitschang},
   title = {{Holistic Load-Balancing in a Distributed Spatial Cache}},
   booktitle = {Proceedings of the 2013 IEEE 14th International Conference on Mobile Data Management},
   publisher = {IEEE Computer Society},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {267--270},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2013},
   doi = {10.1109/MDM.2013.38},
   keywords = {Caching; Geographic Information Systems; Peer-to-peer},
   language = {Englisch},
   cr-category = {H.2 Database Management},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2013-32/INPROC-2013-32.pdf},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {A steadily growing number of people using location based services (LBS) inflict massive query loads on the data tier of an LBS. As such queries usually possess considerable overlap, multiple cache nodes collaborating in a distributed spatial cache can provide scalable access to frequently used data. To preserve high throughput throughout the complete execution process, it is necessary to balance the accumulating load among the participating cache nodes. In this work, we identify three key-indicators to improve resource utilization during the load-balancing process: data skew, anticipated data access patterns and dynamic load peaks. For this reason, we introduce a comprehensive mathematical model to express the key-indicators as probability distribution functions. We fuse the different key-indicators into a single holistic distribution model. In the course of this, we devise a methodology from our holistic distribution model towards a distributed spatial cache offering improved load-balancing},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2013-32&engl=0}
}
@inproceedings {INPROC-2013-15,
   author = {Christoph Gr{\"o}ger and Mark Hillmann and Friedemann Hahn and Bernhard Mitschang and Engelbert Westk{\"a}mper},
   title = {{The Operational Process Dashboard for Manufacturing}},
   booktitle = {Proceedings of the 46th CIRP Conference on Manufacturing Systems (CMS2013), 29-31 May, 2013, Sesimbra, Portugal},
   publisher = {Elsevier},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   type = {Konferenz-Beitrag},
   month = {Mai},
   year = {2013},
   language = {Englisch},
   cr-category = {J.1 Administration Data Processing},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Agility is a critical success factor for manufacturers in today’s volatile global environment and requires employees monitoring their performance and reacting quickly to turbulences. Thus, comprehensive information provisioning on all hierarchy levels is necessary. Yet, existing IT systems, e. g., Manufacturing Execution Systems, scarcely address information needs of workers on the shop floor level. This causes uncoordinated waiting times, inflexibility and costly communication. To address these issues, we present the Operational Process Dashboard for Manufacturing (OPDM), a mobile dashboard for shop floor workers. We identify process-oriented information needs, develop technical dashboard services and define IT requirements for an implementation.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2013-15&engl=0}
}
@inproceedings {INPROC-2013-14,
   author = {Christoph Gr{\"o}ger and Stefan Silcher and Engelbert Westk{\"a}mper and Bernhard Mitschang},
   title = {{Leveraging Apps in Manufacturing. A Framework for App Technology in the Enterprise}},
   booktitle = {Proceedings of the 46th CIRP Conference on Manufacturing Systems (CMS2013), 29-31 May, 2013, Sesimbra, Portugal},
   publisher = {Elsevier},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   type = {Konferenz-Beitrag},
   month = {Mai},
   year = {2013},
   language = {Englisch},
   cr-category = {J.1 Administration Data Processing},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Apps on mobile devices like smartphones have become the core of the digital life of consumers. Apps are used, e. g., for shopping or communicating in social networks. Recently, apps are gaining more and more attention in enterprises as enabler for agile process optimization. In this article, we discuss the potentials and challenges of exploiting this technology with a focus on the manufacturing industry. We come up with a framework for apps in manufacturing companies and identify major areas that need further investigations to fully leverage apps. Moreover, we present existing and novel apps across the product life cycle.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2013-14&engl=0}
}
@inproceedings {INPROC-2013-11,
   author = {Christoph Stach and Bernhard Mitschang},
   title = {{Privacy Management for Mobile Platforms - A Review of Concepts and Approaches}},
   booktitle = {Proceedings of the 14th International Conference on Mobile Data Management},
   publisher = {IEEE Computer Society Conference Publishing Services},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--9},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2013},
   keywords = {privacy; profound overview; permission model},
   language = {Englisch},
   cr-category = {K.4.1 Computers and Society Public Policy Issues,     D.4.6 Operating Systems Security and Protection},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The still rising popularity of modern mobile phones results in an increased demand for manifold applications for these devices. As Android OS supports the development and usage of third-party software greatly, there are more and more developers for this platform. However, many of those applications handle private data grossly negligent which immediately leads to serious privacy concerns. To make matters worse, the current Android permission rules are much too coarse and incomprehensible from the average user's perspective. But even if s/he understands the meaning of the permissions, s/he must either accept all of them or waive the application. Therefore we review concepts and approaches towards effective privacy management for mobile platforms. All this is discussed based on the prevailing key players in the mobile market, namely Apple, RIM, Microsoft and Google. As this work has been initiated by Google we mainly concentrated on Android-based concepts towards customizable privacy management approaches. As a result of our review and taking into account current initiatives and trends in the market, we come up with a novel approach, an implementation architecture and a prototype.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2013-11&engl=0}
}
@inproceedings {INPROC-2012-37,
   author = {Carlos L{\"u}bbe and Anja Reuter and Bernhard Mitschang},
   title = {{Elastic Load-Balancing in a Distributed Spatial Cache Overlay}},
   booktitle = {Proc. of the 13th International Conference on Mobile Data Management (MDM)},
   address = {Washington, DC, USA},
   publisher = {IEEE Computer Society},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {0--10},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2012},
   keywords = {load-balancing; caching; spatial data; peer-to-peer},
   language = {Deutsch},
   cr-category = {H.2.4 Database Management Systems},
   contact = {carlos.luebbe@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Location-based services (LBS) have gained enormous popularity, which imposes increasing query loads at the data tier of an LBS. Yet, the data access patterns of LBS typically possess high temporal and spatial locality. Therefore, a dedicated spatial cache which provides efficient access to the data currently needed may considerably reduce this load. To ensure high throughput, multiple cache nodes can collaborate in a distributed spatial cache overlay, which balances load among the nodes. However, load-balancing is a non-trivial task in this context, as load spreads unevenly in space and varies notably over time. This requires constant readjustment to shifting hot spots. We present an elastic load-balancing mechanism between cache nodes that is based on the physical model of a particle-spring system. Using spring contraction, nodes instantly form processing clusters in regions with high load and thus can easily work off accumulating queries. Our evaluation shows that our approach quickly adapts to rapidly changing hot spots and thereby ensures high throughput throughout the entire execution.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2012-37&engl=0}
}
@inproceedings {INPROC-2012-36,
   author = {Nazario Cipriani and Christoph Stach and Oliver D{\"o}rler and Bernhard Mitschang},
   title = {{NexusDSS - A System for Security Compliant Processing of Data Streams}},
   booktitle = {Proceedings of the First International Conference on Data Technologies and Applications (DATA 2012)},
   publisher = {SciTePress Digital Library},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--11},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2012},
   language = {Englisch},
   cr-category = {C.2.0 Computer-Communication Networks, General,     K.6.5 Security and Protection,     D.4.6 Operating Systems Security and Protection},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Technological advances in microelectronic and communication technology are increasingly leading to a highly connected environment equipped with sensors producing a continuous flow of context data. The steadily growing number of sensory context data available enables new application scenarios and drives new processing techniques. The growing pervasion of everyday life with social media and the possibility of interconnecting them with moving objects{\^a}€™ traces, leads to a growing importance of access control for this kind of data since it concerns privacy issues. The challenge in twofold: First mechanisms to control data access and data usage must be established and second efficient and flexible processing of sensible data must be supported. In this paper we present a flexible and extensible security framework which provides mechanisms to enforce requirements for context data access and beyond that support safe processing of sensible context data according to predefined processing rules. In addition and in contrast to previous concepts, our security framework especially supports fine-grained control to contextual data.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2012-36&engl=0}
}
@inproceedings {INPROC-2012-31,
   author = {Christoph Gr{\"o}ger and Johannes Schlaudraff and Florian Niedermann and Bernhard Mitschang},
   title = {{Warehousing Manufacturing Data. A Holistic Process Warehouse for Advanced Manufacturing Analytics}},
   booktitle = {Proceedings of the 14th International Conference on Data Warehousing and Knowledge Discovery - DaWaK 2012},
   editor = {Alfredo Cuzzocrea and Umeshwar Dayal},
   address = {Berlin, Heidelberg},
   publisher = {Springer},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Lecture Notes in Computer Science},
   volume = {7448},
   pages = {142--155},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2012},
   keywords = {Data Warehouse; Manufacturing; Process Optimization; Analytics; Business Intelligence; Data Integration},
   language = {Englisch},
   cr-category = {H.2.7 Database Administration,     J.1 Administration Data Processing},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Strong competition in the manufacturing industry makes efficient and effective manufacturing processes a critical success factor. However, existing warehousing and analytics approaches in manufacturing are coined by substantial shortcomings, significantly preventing comprehensive process improvement. Especially, they miss a holistic data base integrating operational and process data, e. g., from Manufacturing Execution and Enterprise Resource Planning systems. To address this challenge, we introduce the Manufacturing Warehouse, a concept for a holistic manufacturing-specific process warehouse as central part of the overall Advanced Manufacturing Analytics Platform. We define a manufacturing process meta model and deduce a universal warehouse model. In addition, we develop a procedure for its instantiation and the integration of concrete source data. Finally, we describe a first proof of concept based on a prototypical implementation.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2012-31&engl=0}
}
@inproceedings {INPROC-2012-28,
   author = {Stefan Silcher and Max Dinkelmann and Jorge Minguez and Bernhard Mitschang},
   title = {{A Service-based Integration for an improved Product Lifecycle Management}},
   booktitle = {Proceedings of the 14th International Conference on Enterprise Information Systems},
   editor = {Alfredo Cuzzocrea and Jos{\'e} Cordeiro Leszek Maciaszek},
   address = {Wroc\&\#322;aw, Poland},
   publisher = {INSTICC Press},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {38--47},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2012},
   isbn = {978-989-8565-10-5},
   keywords = {Product Lifecycle Management; Service-oriented Architecture; Modular IT Integration},
   language = {Englisch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     D.2.13 Software Engineering Reusable Software},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The continuously changing environment is nowadays a major challenge for companies. The tough competition, growing customization of products and environmental regulations forces companies to continuously adapt their business processes. In order to manage the complexity and reduce the effort for developing products and production, many IT systems are indispensable. Despite Product Lifecycle Management Technology (PLM) the growing heterogeneous IT landscapes lack of a continuous support for business processes and get quickly unmanageable. In this paper PLM technology is extended by a service-based integration approach. Therefore, a modular service-based architecture was developed which will be presented in detail. The architecture describes how the whole product life cycle can be integrated more efficiently. The characteristics and findings of our approach are presented as well as a first prototype covering the production planning.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2012-28&engl=0}
}
@inproceedings {INPROC-2012-15,
   author = {Christoph Gr{\"o}ger and Florian Niedermann and Bernhard Mitschang},
   title = {{Data Mining-driven Manufacturing Process Optimization}},
   booktitle = {Proceedings of the World Congress on Engineering 2012 Vol III, WCE 2012, 4 – 6 July, 2012, London, U.K.},
   editor = {S. I. Ao and L. Gelman and D. W. L. Hukins and A. Hunter and A. M. Korsunsky},
   publisher = {Newswood},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1475--1481},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2012},
   isbn = {978-988-19252-2-0},
   keywords = {Analytics; Data Mining; Decision Support; Process Optimization},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications,     J.1 Administration Data Processing},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {High competitive pressure in the global manufacturing industry makes efficient, effective and continuously improved manufacturing processes a critical success factor. Yet, existing analytics in manufacturing, e. g., provided by Manufacturing Execution Systems, are coined by major shortcomings considerably limiting continuous process improvement. In particular, they do not make use of data mining to identify hidden patterns in manufacturing-related data. In this article, we present indication-based and pattern-based manufacturing process optimization as novel data mining approaches provided by the Advanced Manufacturing Analytics Platform. We demonstrate their usefulness through use cases and depict suitable data mining techniques as well as implementation details.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2012-15&engl=0}
}
@inproceedings {INPROC-2012-14,
   author = {Christoph Gr{\"o}ger and Florian Niedermann and Holger Schwarz and Bernhard Mitschang},
   title = {{Supporting Manufacturing Design by Analytics. Continuous Collaborative Process Improvement enabled by the Advanced Manufacturing Analytics Platform}},
   booktitle = {Proceedings of the 2012 16th IEEE International Conference on Computer Supported Cooperative Work in Design (CSCWD), May 23-25, 2012, Wuhan, China},
   editor = {Liang Gao and Weiming Shen and Jean-Paul Barth{\`e}s and Junzhou Luo and Jianming Yong and Wenfeng Li and Weidong Li},
   publisher = {IEEE},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {793--799},
   type = {Konferenz-Beitrag},
   month = {Mai},
   year = {2012},
   isbn = {978-1-4673-1210-3},
   keywords = {Analytics; Data Mining; Process Management; Manufacturing; Process Optimization},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications,     J.1 Administration Data Processing},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The manufacturing industry is faced with global competition making efficient, effective and continuously improved manufacturing processes a critical success factor. Yet, media discontinuities, the use of isolated analysis methods on local data sets as well as missing means for sharing analysis results cause a collaborative gap in Manufacturing Process Management that prohibits continuous process improvement. To address this chal-lenge, this paper proposes the Advanced Manufacturing Analytics (AdMA) Platform that bridges the gap by integrating operational and process manufacturing data, defining a reposito-ry for analysis results and providing indication-based and pat-tern-based optimization techniques. Both the conceptual architec-ture underlying the platform as well as its current implementa-tion are presented in this paper.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2012-14&engl=0}
}
@inproceedings {INPROC-2011-87,
   author = {Jorge Minguez and Florian Niedermann and Bernhard Mitschang},
   title = {{A provenance-aware service repository for EAI process modeling tools}},
   booktitle = {IEEE International Conference on Information Reuse and Integration 2011 (IRI '11)},
   address = {Las Vegas},
   publisher = {IEEE Press},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {42--47},
   type = {Konferenz-Beitrag},
   month = {August},
   year = {2011},
   doi = {10.1109/IRI.2011.6009518},
   keywords = {EAI process modeling tool; business process; business service; data interoperability; enterprise application integration; functional interoperability; manufacturing domain; process lifecycle management; provenance aware service repository; provenance data model; provenance subscription capabilities; service engineering methods; service knowledge base; service reusability; business data processing; knowledge based systems; manufacturing industries; open systems},
   language = {Englisch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     D.2.13 Software Engineering Reusable Software},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {One of the major challenges for Enterprise Application Integration (EAI) process modeling tools is the continuous adaptation of the business processes and services. Business and IT specialists are both confronted with a number of problems involved in the adaptation of such processes, such as the lack of support for process lifecycle management, data and functional interoperability problems or the appropriate service knowledge base. Currently, most service engineering methods adopt a lifecycle strategy for the design, implementation, deployment and evaluation of services. However, enterprises exploiting service reusability lack the knowledge on process dependencies across the entire service lifecycle. This knowledge is required by process modeling tools in order to keep EAI processes loosely-coupled. Using a provenance data model we describe the different types of service dependencies in EAI processes with regard to the service changes across its lifecycle. We present a provenance-aware service repository with provenance subscription capabilities and its adoption for different use cases in the manufacturing domain.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-87&engl=0}
}
@inproceedings {INPROC-2011-86,
   author = {Nazario Cipriani and Oliver Schiller and Bernhard Mitschang},
   title = {{M-TOP: Multi-target Operator Placement of Query Graphs for Data Streams}},
   booktitle = {Proceedings of the 15th International Database Engineering and Applications Symposium (IDEAS 2011)},
   publisher = {ACM},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {52--60},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2011},
   language = {Englisch},
   cr-category = {G.1.6 Numerical Analysis Optimization,     C.2.3 Network Operations},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Nowadays, many applications processes stream-based data, such as financial market analysis, network intrusion detection, or visualization applications. To process stream-based data in an applicationindependent manner, distributed stream processing systems emerged. They typically translate a query to an operator graph, place the operators to stream processing nodes, and execute them to process the streamed data. The operator placement is crucial in such systems, as it deeply influences query execution. Often, different stream-based applications require dedicated placement of query graphs according to their specific objectives, e.g. bandwidth not less than 500 MBit/s and costs not more that 1 cost unit. This fact constraints operator placement. Existing approaches do not take into account application-specific objectives, thus not reflecting application-specific placement decisions. As objectives might conflict among each other, operator placement is subject to delicate trade-offs, such as bandwidth maximization is more important than cost reduction. Thus, the challenge is to find a solution which considers the application-specific objectives and their trade-offs. We present M-TOP, an QoS-aware multi-target operator placement framework for data stream systems. Particularly, we propose an operator placement strategy considering application-specific targets consisting of objectives, their respective trade-offs specifications, bottleneck conditions, and ranking schemes to compute a suitable placement. We integrated M-TOP into NexusDS, our distributed data stream processing middleware, and provide an experimental evaluation to show the effectiveness of M-TOP.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-86&engl=0}
}
@inproceedings {INPROC-2011-85,
   author = {Nazario Cipriani and Matthias Grossmann and Harald Sanftmann and Bernhard Mitschang},
   title = {{Design Considerations of a Flexible Data Stream Processing Middleware}},
   booktitle = {Proceedings of the 15th East-European Conference on Advances in Databases and Information Systems (ADBIS 2011)},
   publisher = {CEUR-WS.org},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {222--231},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2011},
   language = {Deutsch},
   cr-category = {K.6.1 Project and People Management},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Techniques for efficient and distributed processing of huge, unbound data streams have made some impact in the database community. Distributed data stream processing systems have emerged providing a distributed environment to process these potentially unbound streams of data by a set of processing nodes. A wide range of realtime applications process stream-based data. Sensors and data sources, such as position data of moving objects, continuously produce data that is consumed by, e.g., location-aware applications. Depending on the domain of interest, the processing of such data often depends on domain-specific functionality. For instance, an application which visualizes stream-based data has stringent timing constraints, or may even need a specific hardware environment to smoothly process the data. Furthermore, users may add additional constraints. E.g., for security reasons they may want to restrict the set of nodes that participates in processing. In this paper we review context-aware applications which, despite their different application fields, share common data processing principles. We analyse these applications and extract common requirements which data stream processing systems must meet to support these applications. Finally, we show how such applications are implemented using NexusDS, our extensible stream processing middleware.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-85&engl=0}
}
@inproceedings {INPROC-2011-57,
   author = {Andreas Brodt and Oliver Schiller and Bernhard Mitschang},
   title = {{Efficient resource attribute retrieval in RDF triple stores}},
   booktitle = {Proceeding of the 20th ACM conference on Information and knowledge management (CIKM)},
   publisher = {Association for Computing Machinery (ACM)},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   type = {Konferenz-Beitrag},
   month = {Oktober},
   year = {2011},
   keywords = {RDF; SPARQL},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems,     H.2.2 Database Management Physical Design},
   contact = {andreas.brodt@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The W3C Resource Description Framework (RDF) is gaining popularity for its ability to manage semi-structured data without a predefined database schema. So far, most RDF query processors have concentrated on finding complex graph patterns in RDF, which typically involves a high number of joins. This works very well to query resources by the relations between them. Yet, obtaining a record-like view on the attributes of resources, as natively supported by RDBMS, imposes unnecessary performance burdens, as the individual attributes must be joined to assemble the final result records. We present an approach to retrieve the attributes of resources efficiently. We first determine the resources in question and then retrieve all their attributes efficiently at once, exploiting contiguous storage in RDF indexes. In addition, we present an index structure which is specifically designed for RDF attribute retrieval. In a performance evaluation we show that our approach is clearly superior for larger numbers of retrieved attributes.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-57&engl=0}
}
@inproceedings {INPROC-2011-40,
   author = {Jorge Minguez and Stefan Silcher and Bernhard Mitschang and Engelbert Westk{\"a}mper},
   title = {{Towards Intelligent Manufacturing: Equipping SOA-based Architectures with advanced SLM Services}},
   booktitle = {Proceedings of the 44th CIRP International Conference on Manufacturing Systems},
   publisher = {CIRP},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--8},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2011},
   keywords = {Service Oriented Architecture; Manufacturing; Service Lifecycle Management; SOA; SLM; Adaptability; Wandlungsf{\"a}higkeit},
   language = {Englisch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     D.2.13 Software Engineering Reusable Software},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The vision of knowledge-based and intelligent manufacturing systems is driving the development of system architectures, which can seamlessly manage information flows across multiple heterogeneous manufacturing systems and provide the necessary services to support the execution of production processes. Constantly changing business conditions and turbulent scenarios force manufacturing companies to continuously adapt their business processes and manufacturing systems. In such a context, a flexible infrastructure that supports the full integration of processes and adapts its services is needed. This paper presents an innovative semantic service framework that enables the adoption of service lifecycle management (SLM) in an SOA-based integration framework.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-40&engl=0}
}
@inproceedings {INPROC-2011-39,
   author = {Stefan Silcher and Jorge Minguez and Bernhard Mitschang},
   title = {{Adopting the Manufacturing Service Bus in a Service-based Product Lifecycle Management Architecture}},
   booktitle = {Proceedings of the 44th International CIRP Conference on Manufacturing Systems: ICMS '11; Madison, Wisconsin, USA, May 31 - June 3, 2011},
   publisher = {Online},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--6},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2011},
   keywords = {Information; System Architecture; Product Lifecycle Management; Service Oriented Architecture; Enterprise Service Bus},
   language = {Englisch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     D.2.13 Software Engineering Reusable Software},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Service-oriented computing is nowadays a rising technology to implement business processes in an efficient and flexible manner. This technology has a great impact on manufacturing environments. The realization of Product Lifecycle Management (PLM) with a Service Oriented Architecture (SOA) has many benefits. Some advantages are a seamless and flexible integration of all applications within PLM, including legacy systems, improved data provisioning and a reduced complexity by using a common service-based integration middleware, such as the Manufacturing Service Bus (MSB). In this paper the integration of the MSB into the service-oriented PLM approach will be described in detail.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-39&engl=0}
}
@inproceedings {INPROC-2011-37,
   author = {Sylvia Radesch{\"u}tz and Marko Vrhovnik and Holger Schwarz and Bernhard Mitschang},
   title = {{Exploiting the Symbiotic Aspects of Process and Operational Data for Optimizing Business Processes}},
   booktitle = {Proc. of the 12th IEEE International Conference on Information Reuse and Integration (IRI 2011)},
   address = {Las Vegas, USA},
   publisher = {IEEE},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--6},
   type = {Konferenz-Beitrag},
   month = {August},
   year = {2011},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {A profound analysis of all relevant business data in a company is necessary for optimizing business processes effectively. Current analyses typically run either on business process execution data or on operational business data. Correlations among the separate data sets have to be found manually under big effort. However, to achieve a more informative analysis and to fully optimize a company’s business, an efficient consolidation of all major data sources is indispensable. Recent matching algorithms are insufficient for this task since they are restricted either to schema or to process matching. We present a new matching framework to combine process data models and operational data models (semi-)automatically for performing such a profound business analysis. We describe this approach and its basic matching rules as well as an experimental study that shows the achieved high recall and precision.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-37&engl=0}
}
@inproceedings {INPROC-2011-36,
   author = {Oliver Schiller and Benjamin Schiller and Andreas Brodt and Bernhard Mitschang},
   title = {{Native support of multi-tenancy in RDBMS for software as a service}},
   booktitle = {EDBT},
   publisher = {ACM},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {117--128},
   type = {Konferenz-Beitrag},
   month = {Januar},
   year = {2011},
   language = {Englisch},
   cr-category = {H.2.1 Database Management Logical Design,     H.2 Database Management},
   ee = {http://doi.acm.org/10.1145/1951365.1951382},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Software as a Service (SaaS) facilitates acquiring a huge number of small tenants by providing low service fees. To achieve low service fees, it is essential to reduce costs per tenant. For this, consolidating multiple tenants onto a single relational schema instance turned out beneficial because of low overheads per tenant and scalable man- ageability. This approach implements data isolation between ten- ants, per-tenant schema extension and further tenant-centric data management features in application logic. This is complex, dis- ables some optimization opportunities in the RDBMS and repre- sents a conceptual misstep with Separation of Concerns in mind. Therefore, we contribute first features of a RDBMS to support tenant-aware data management natively. We introduce tenants as first-class database objects and propose the concept of a tenant con- text to isolate a tenant from other tenants. We present a schema inheritance concept that allows sharing a core application schema among tenants while enabling schema extensions per tenant. Fi- nally, we evaluate a preliminary implementation of our approach.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-36&engl=0}
}
@inproceedings {INPROC-2011-26,
   author = {Florian Niedermann and Sylvia Radesch{\"u}tz and Bernhard Mitschang},
   title = {{Design-Time Process Optimization through Optimization Patterns and Process Model Matching}},
   booktitle = {Proceedings of the 12th IEEE Conference on Commerce and Enterprise Computing (CEC)},
   publisher = {IEEE Computer Society},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {48--55},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2011},
   keywords = {Business Process Analytics; Business Process Design; Business Process Management; Business Process Optimization; Process Model Matching},
   language = {Englisch},
   cr-category = {H.4.1 Office Automation},
   ee = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=5708392},
   contact = {florian.niedermann@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The goal of process design is the construction of a process model that is a priori optimal w.r.t. the goal(s) of the business owning the process. Process design is therefore a major factor in determining the process performance and ultimately the success of a business. Despite this importance, the designed process is often less than optimal. This is due to two major challenges: First, since the design is an a priori ability, no actual execution data is available to provide the foundations for design decisions. Second, since modeling decision support is typically basic at best, the quality of the design largely depends on the ability of business analysts to make the ”right” design choices. To address these challenges, we present in this paper our deep Business Optimization Platform that enables (semi-) automated process optimization during process design based on actual execution data. Our platform achieves this task by matching new processes to existing processes stored in a repository based on similarity metrics and by using a set of formalized best-practice process optimization patterns.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-26&engl=0}
}
@inproceedings {INPROC-2011-24,
   author = {Florian Niedermann and Bernhard Maier and Sylvia Radesch{\"u}tz and Holger Schwarz and Bernhard Mitschang},
   title = {{Automated Process Decision Making based on Integrated Source Data}},
   booktitle = {Proceedings of the 14th International Conference on Business Information Systems (BIS 2011)},
   editor = {Witold Abramowicz},
   publisher = {Springer},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Lecture Notes in Business Information Processing},
   pages = {1--10},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2011},
   keywords = {Data Mining, Decision Automation, Data Integration, Business Process Management, Data-driven Processes},
   language = {Englisch},
   cr-category = {H.4.1 Office Automation,     H.2.8 Database Applications,     H.5.2 Information Interfaces and Presentation User Interfaces},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The success of most of today's businesses is tied to the efficiency and effectiveness of their core processes. Yet, two major challenges often prevent optimal processes: First, the analysis techniques applied during the optimization are inadequate and fail to include all relevant data sources. Second, the success depends on the abilities of the individual analysts to spot the right designs amongst a plethora of choices. Our deep Business Optimization Platform addresses these challenges through specialized data integration, analysis and optimization facilities. In this paper, we focus on how it uses formalized process optimization patterns for detecting and implementing process improvements.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-24&engl=0}
}
@inproceedings {INPROC-2011-23,
   author = {Florian Niedermann and Sylvia Radesch{\"u}tz and Bernhard Mitschang},
   title = {{Business Process Optimization using Formalized Optimization Patterns}},
   booktitle = {Proceedings of the 14th International Conference on Business Information Systems (BIS 2011)},
   editor = {Witold Abramowicz},
   publisher = {Springer},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--10},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2011},
   keywords = {Business Process Management; Business Process Optimization},
   language = {Englisch},
   cr-category = {H.4.1 Office Automation},
   contact = {florian.niedermann@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The success of most of today's businesses is tied to the efficiency and effectiveness of their core processes. Yet, two major challenges often prevent optimal processes: First, the analysis techniques applied during the optimization are inadequate and fail to include all relevant data sources. Second, the success depends on the abilities of the individual analysts to spot the right designs amongst a plethora of choices. Our deep Business Optimization Platform addresses these challenges through specialized data integration, analysis and optimization facilities. In this paper, we focus on how it uses formalized process optimization patterns for detecting and implementing process improvements.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-23&engl=0}
}
@inproceedings {INPROC-2011-20,
   author = {Carlos L{\"u}bbe and Andreas Brodt and Nazario Cipriani and Matthias Gro{\ss}mann and Bernhard Mitschang},
   title = {{DiSCO: A Distributed Semantic Cache Overlay for Location-based Services}},
   booktitle = {Proceedings of the 2011 Twelfth International Conference on Mobile Data Management},
   address = {Washington, DC, USA},
   publisher = {IEEE Computer Society},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {1--10},
   type = {Konferenz-Beitrag},
   month = {Januar},
   year = {2011},
   keywords = {peer-to-peer; semantic caching},
   language = {Deutsch},
   cr-category = {C.2.4 Distributed Systems,     H.2.4 Database Management Systems},
   contact = {carlos.luebbe@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Location-based services (LBS) have gained tremendous popularity with millions of simultaneous users daily. LBS handle very large data volumes and face enormous query loads. Both the data and the queries possess high locality: spatial data is distributed very unevenly around the globe, query load is different throughout the day, and users often search for similar things in the same places. This causes high load peaks at the data tier of LBS, which may seriously degrade performance. To cope with these load peaks, we present DiSCO, a distributed semantic cache overlay for LBS. DiSCO exploits the spatial, temporal and semantic locality in the queries of LBS and distributes frequently accessed data over many nodes. Based on the Content-Addressable Network (CAN) peer-to-peer approach, DiSCO achieves high scalability by partitioning data using spatial proximity. Our evaluation shows that DiSCO significantly reduces queries to the underlying data tier.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-20&engl=0}
}
@inproceedings {INPROC-2011-16,
   author = {Andreas Brodt and Oliver Schiller and Sailesh Sathish and Bernhard Mitschang},
   title = {{A mobile data management architecture for interoperability of resource and context data}},
   booktitle = {Proceedings of the 2011 Twelveth International Conference on Mobile Data Management},
   publisher = {IEEE Computer Society},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {1--6},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2011},
   language = {Englisch},
   cr-category = {H.3.4 Information Storage and Retrieval Systems and Software,     H.3.5 Online Information Services,     H.2.4 Database Management Systems},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2011-16/INPROC-2011-16.pdf},
   contact = {brodt@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Mobile devices have become general-purpose computers that are equipped with sensors, constantly access the internet, and almost always accompany the user. Consequently, devices manage many different kinds of data about the user's life and context. There is considerable overlap in this data, as different applications handle similar data domains. Applications often keep this data in separated data silos. Web applications, which manage large amounts of personal data, hardly share this data with other applications at all. This lack of interoperability creates redundancy and impacts usability of mobile devices. We present a data management architecture for mobile devices to support interoperability between applications, devices and web applications at the data management level. We propose a central on-device repository for applications to share resource and context data in an integrated, extensible data model which uses semantic web technologies and supports location data. A web browser interface shares data with web applications, as controlled by a general security model.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-16&engl=0}
}
@inproceedings {INPROC-2010-89,
   author = {Florian Niedermann and Sylvia Radesch{\"u}tz and Bernhard Mitschang},
   title = {{Deep Business Optimization: A Platform for Automated Process Optimization}},
   booktitle = {Business Process and Service Science - Proceedings of ISSS and BPSC: BPSC'10; Leipzig, Germany, September 27th - October 1st, 2010},
   editor = {Witold Abramowicz and Rainer Alt and Klaus-Peter F{\"a}ndrich and Bogdan Franczyk and Leszek A Maciaszek},
   publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Lecture Notes in Informatics},
   volume = {P177},
   pages = {168--180},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2010},
   isbn = {978-3-88579-271-0},
   language = {Englisch},
   cr-category = {H.4.1 Office Automation,     H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The efficient and effective design, execution and adaption of its core processes is vital for the success of most businesses and a major source of competitive advantage. Despite this critical importance, process optimization today largely depends on manual analytics and the ability of business analysts to spot the ``right'' designs and areas of improvement. This is because current techniques typically fall short in three areas: They fail to integrate relevant data sources, they do not provide optimal analytical procedures and they leave it up to the analyst to identify the best process design. Hence, we propose in this paper a platform that enables (semi-)automated process optimization during the process design, execution and analysis stage, based on insights from specialized analytical procedures running on an integrated warehouse containing both process and operational data. We further detail the analysis stage, as it provides the foundation for all other optimization stages.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-89&engl=0}
}
@inproceedings {INPROC-2010-79,
   author = {Stefan Silcher and Jorge Minguez and Thorsten Scheibler and Bernhard Mitschang},
   title = {{A Service-Based Approach for Next-Generation Product Lifecycle Management}},
   booktitle = {Proceedings of the 11th IEEE International Conference on Information Reuse and Integration (IEEE IRI 2010) in Las Vegas, Nevada, USA.},
   publisher = {IEEE},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {219--224},
   type = {Konferenz-Beitrag},
   month = {August},
   year = {2010},
   isbn = {978-1-4244-8098-2},
   keywords = {Product Lifecycle Management; PLM; Service Oriented Architecture; SOA; Enterprise Service Bus; ESB},
   language = {Englisch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     D.2.13 Software Engineering Reusable Software},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Nowadays, one of the main challenges for companies is the effective management of IT-systems. In times where requirements and companies change steadily, the ITinfrastructure has to adopt these changes as well: new systems have to be integrated or existing adapted. Even worse, these systems work together to support business processes of a company and, thus, the infrastructures becomes complex and difficult to manage. The same situation is true for Product Lifecycle Management (PLM) that accompanies a product development by means of interconnected IT systems running on complex IT infrastructures. This paper introduces a viable solution to the integration of all phases of PLM. An Enterprise Service Bus (ESB) is employed as the service-based integration and communication infrastructure. Three exemplary scenarios are introduced to describe the benefits of using an ESB as compared to alternative PLM infrastructures. Furthermore, we introduce a service hierarchy to enable value-added services to enhance PLM functionality.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-79&engl=0}
}
@inproceedings {INPROC-2010-45,
   author = {Jorge Minguez and Dominik Lucke and Mihaly Jakob and Carmen Constantinescu and Bernhard Mitschang},
   title = {{Introducing SOA into Production Environments - The Manufacturing Service Bus}},
   booktitle = {Proceedings of the 43rd. CIRP International Conference on Manufacturing Systems},
   address = {Vienna, Graz, Austria},
   publisher = {neuer wissenschaftler Verlag},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1117--1124},
   type = {Konferenz-Beitrag},
   month = {Mai},
   year = {2010},
   language = {Englisch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     D.2.13 Software Engineering Reusable Software},
   contact = {jorge.minguez@gsame.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Volatile markets and constantly changing business conditions force manufacturing enterprises to continuously adapt their digital factory information systems. In many industries there is still no backbone for the integration of factory information systems. Current integration is based on point-to-point interfaces, which is partially due to the cost of replacing their established legacy systems. Furthermore the lack of flexibility prevents business processes to improve their responsiveness and adapt workflows of manufacturing to different turbulences. The principles of Service-Oriented Architecture (SOA) and its associated technologies (e. g., Enterprise Service Bus) and standards are key enabler and driver of the required flexibility. The presented Manufacturing Service Bus is a SOA-based approach that extends the Enterprise Service Bus capabilities in three areas: event management, factory context semantics and change propagation workflows. The Manufacturing Service Bus provides an event-driven platform for flexible integration of digital factory applications.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-45&engl=0}
}
@inproceedings {INPROC-2010-37,
   author = {Benjamin Leonhardi and Bernhard Mitschang and Rub{\'e}n Pulido and Christoph Sieb and Michael Wurst},
   title = {{Augmenting OLAP exploration with dynamic advanced analytics.}},
   booktitle = {Proceedings of the 13th International Conference on Extending Database Technology (EDBT 2010),Lausanne, Switzerland,March 22-26,2010},
   address = {New York, NY, USA},
   publisher = {ACM},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Germany},
   pages = {687--692},
   type = {Konferenz-Beitrag},
   month = {April},
   year = {2010},
   isbn = {978-1-60558-945-9},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {Online Analytical Processing (OLAP) is a popular technique for explorative data analysis. Usually, a fixed set of dimensions (such as time, place, etc.) is used to explore and analyze various subsets of a given, multi-dimensional data set. These subsets are selected by constraining one or several of the dimensions, for instance, showing sales only in a given year and geographical location. Still, such aggregates are often not enough. Important information can only be discovered by combining several dimensions in a multidimensional analysis. Most existing approaches allow to add new dimensions either statically or dynamically. These approaches support, however, only the creation of global dimensions that are not interactive for the user running the report. Furthermore, they are mostly restricted to data clustering and the resulting dimensions cannot be interactively refined. In this paper we propose a technique and an architectural solution that is based on an interaction concept for creating OLAP dimensions on subsets of the data dynamically, triggered interactively by the user, based on arbitrary multi-dimensional grouping mechanisms. This approach allows combining the advantages of both, OLAP exploration and interactive multidimensional analysis. We demonstrate the industry-strength of our solution architecture using a setup of IBM® InfoSphere™ Warehouse data mining and Cognos® BI as reporting engine. Use cases and industrial experiences are presented showing how insight derived from data mining can be transparently presented in the reporting front end, and how data mining algorithms can be invoked from the front end, achieving closed-loop integration.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-37&engl=0}
}
@inproceedings {INPROC-2010-122,
   author = {Nicola H{\"o}nle and Matthias Gro{\ss}mann and Steffen Reimann and Bernhard Mitschang},
   title = {{Usability analysis of compression algorithms for position data streams}},
   booktitle = {18th ACM SIGSPATIAL International Symposium on Advances in Geographic Information Systems, ACM-GIS 2010, November 3-5, 2010, San Jose, CA, USA, Proceedings},
   editor = {Agrawal Divyakant and Pusheng Zhang and Amr El Abbadi and Mohamed F. Mokbel},
   publisher = {ACM},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {240--249},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2010},
   keywords = {trajectory compression; sensor data stream},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications,     C.2.4 Distributed Systems,     F.2.2 Nonnumerical Algorithms and Problems,     G.1.2 Numerical Analysis Approximation},
   contact = {nicola.hoenle@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {With the increasing use of sensor technology, the compression of sensor data streams is getting more and more important to reduce both the costs of further processing as well as the data volume for persistent storage. A popular method for sensor data compression is to smooth the original measurement curve by an approximated curve, which is bounded by a given maximum error value. Measurement values from positioning systems like GPS are an interesting special case, because they consist of two spatial and one temporal dimension. Therefore various standard techniques for approximation calculations like regression or line simplification algorithms cannot be directly applied. In this paper, we portray our stream data management system NexusDS and an operator for compressing sensor data. For the operator, we implemented various compression algorithms for position data streams. We present the required adaptations and the different characteristics of the compression algorithms as well as the results of our evaluation experiments, and compare them with a map matching approach, specifically developed for position data.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-122&engl=0}
}
@inproceedings {INPROC-2010-121,
   author = {Jorge Minguez and Frank Ruthardt and Philipp Riffelmacher and Thorsten Scheibler and Bernhard Mitschang},
   title = {{Service-based Integration in Event-driven Manufacturing Environments}},
   booktitle = {WISE 2010 Workshops},
   publisher = {Springer},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Lecture Notes in Computer Science},
   volume = {6724},
   pages = {0--14},
   type = {Konferenz-Beitrag},
   month = {Dezember},
   year = {2010},
   keywords = {Manufacturing; Service-oriented Computing; Service-oriented Architecture},
   language = {Englisch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     D.2.13 Software Engineering Reusable Software},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Constantly changing business conditions require a high level of flexibility in business processes as well as an adaptive and fully interoperable IT infrastructure in today’s manufacturing environments. The lack of flexibility prevents manufacturing companies to improve their responsiveness and to adapt their workflows to turbulent scenarios. In order to achieve highly flexible and adaptive workflows, information systems in digital factories and shop floors need to be integrated. The most challenging problem in such manufacturing environments is the high heterogeneity of the IT landscape, where the integration of legacy systems and information silos has lead to chaotic architectures over the last two decades. In order to overcome this issue, we present a flexible integration platform that allows a loose coupling of distributed services in event-driven manufacturing environments. Our approach enables a flexible communication between digital factory and shop floor components by introducing a service bus architecture. Our solution integrates an application-independent canonical message format for manufacturing events, content-based routing and transformation services as well as event processing workflows.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-121&engl=0}
}
@inproceedings {INPROC-2010-120,
   author = {Andreas Brodt and Alexander Wobser and Bernhard Mitschang},
   title = {{Resource Discovery Protocols for Bluetooth-Based Ad-hoc Smart Spaces: Architectural Considerations and Protocol Evaluation}},
   booktitle = {Proceedings of the 2010 Eleventh International Conference on Mobile Data Management},
   address = {Washington, DC, USA},
   publisher = {IEEE Computer Society},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {145--150},
   type = {Konferenz-Beitrag},
   month = {Mai},
   year = {2010},
   isbn = {978-0-7695-4048-1},
   keywords = {Bluetooth; ad-hoc network; scatternet; protocol; evaluation; ad-hoc smart spaces; resource discovery; mobile device},
   language = {Englisch},
   cr-category = {C.2.1 Network Architecture and Design,     C.2.2 Network Protocols},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2010-120/INPROC-2010-120.pdf},
   contact = {brodt@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Ad-hoc smart spaces aim at resource-rich mobile devices sharing resource and context data with others near them spontaneously. Thus, a device may, e.g., obtain a more complete context model by utilizing sensor data of its neighbors via wireless communication, such as Bluetooth. The highly dynamic device neighborhood challenges resource discovery, as the devices have to organize themselves autonomously. This paper evaluates different resource discovery protocols in Bluetooth-based ad-hoc smart spaces. We simulate the protocols in different scenarios taking into account the scatternet structure of the network. We suggest request flooding for small settings and random replication for medium to large spaces.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-120&engl=0}
}
@inproceedings {INPROC-2010-119,
   author = {Andreas Brodt and Daniela Nicklas and Bernhard Mitschang},
   title = {{Deep integration of spatial query processing into native RDF triple stores}},
   booktitle = {Proceedings of the 18th SIGSPATIAL International Conference on Advances in Geographic Information Systems},
   address = {New York, NY, USA},
   publisher = {ACM Press},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {33--42},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2010},
   isbn = {978-1-4503-0428-3},
   keywords = {GIS, RDF, SPARQL, spatial database, triple store},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2010-119/INPROC-2010-119.pdf},
   contact = {brodt@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Semantic Web technologies, most notably RDF, are well-suited to cope with typical challenges in spatial data management including analyzing complex relations between entities, integrating heterogeneous data sources and exploiting poorly structured data, e.g., from web communities. Also, RDF can easily represent spatial relationships, as long as the location information is symbolic, i.e., represented by places that have a name. What is widely missing is support for geographic and geometric information, such as coordinates or spatial polygons, which is needed in many applications that deal with sensor data or map data. This calls for efficient data management systems which are capable of querying large amounts of RDF data and support spatial query predicates. We present a native RDF triple store implementation with deeply integrated spatial query functionality. We model spatial features in RDF as literals of a complex geometry type and express spatial predicates as SPARQL filter functions on this type. This makes it possible to use W3C's standardized SPARQL query language as-is, i.e., without any modifications or extensions for spatial queries. We evaluate the characteristics of our system on very large data volumes.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-119&engl=0}
}
@inproceedings {INPROC-2009-94,
   author = {Nazario Cipriani and Mike Eissele and Andreas Brodt and Matthias Gro{\ss}mann and Bernhard Mitschang},
   title = {{NexusDS: A Flexible and Extensible Middleware for Distributed Stream Processing}},
   booktitle = {IDEAS '09: Proceedings of the 2008 International Symposium on Database Engineering \& Applications},
   editor = {ACM},
   publisher = {ACM},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {152--161},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2009},
   keywords = {Data Stream Processing; Stream Databases; Middleware Platforms for DataManagement; P2P and Networked DataManagement; Database Services and Applications},
   language = {Englisch},
   cr-category = {C.2 Computer-Communication Networks,     C.5 Computer System Implementation,     H.2 Database Management},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {ABSTRACT Techniques for efficient and distributed processing of huge, unbound data streams have made some impact in the database community. Sensors and data sources, such as position data of moving objects, continuously produce data that is consumed, e.g., by location-aware applications. Depending on the domain of interest, e.g. visualization, the processing of such data often depends on domain-specific functionality. This functionality is specified in terms of dedicated operators that may require specialized hardware, e.g. GPUs. This creates a strong dependency which a data stream processing system must consider when deploying such operators. Many data stream processing systems have been presented so far. However, these systems assume homogeneous computing nodes, do not consider operator deployment constraints, and are not designed to address domain-specific needs. In this paper, we identify necessary features that a exible and extensible middleware for distributed stream processing of context data must satisfy. We present NexusDS, our approach to achieve these requirements. In NexusDS, data processing is specified by orchestrating data flow graphs, which are modeled as processing pipelines of predefined and general operators as well as custom-built and domain-specific ones. We focus on easy extensibility and support for domain-specific operators and services that may even utilize specific hardware available on dedicated computing nodes},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-94&engl=0}
}
@inproceedings {INPROC-2009-68,
   author = {Sylvia Radesch{\"u}tz and Bernhard Mitschang},
   title = {{Extended Analysis Techniques For a Comprehensive Business Process Optimization}},
   booktitle = {Proc. of the International Conference on Knowledge Management and Information Sharing (KMIS 2009), Portugal, 6.-8. Oktober, 2009.},
   publisher = {Springer},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--6},
   type = {Konferenz-Beitrag},
   month = {Oktober},
   year = {2009},
   language = {Englisch},
   cr-category = {H.2.7 Database Administration,     H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Efficient adaption of a company’s business and its business processes to a changing environment is a crucial ability to survive in today’s dynamic world. For optimizing business processes, a profound analysis of all relevant business data in the company is necessary. We define an extended data warehouse approach that integrates process-related data and operational business data. This extended data warehouse is used as the underlying data source for extended OLAP and data mining analysis techniques for a comprehensive business process optimization.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-68&engl=0}
}
@inproceedings {INPROC-2009-130,
   author = {Jorge Minguez and Mihaly Jakob and Uwe Heinkel and Bernhard Mitschang},
   title = {{A SOA-based approach for the integration of a data propagation system}},
   booktitle = {IRI'09: Proceedings of the 10th IEEE international conference on Information Reuse \& Integration},
   publisher = {IEEE Press},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {47--52},
   type = {Konferenz-Beitrag},
   month = {August},
   year = {2009},
   isbn = {978-1-4244-4114-3},
   language = {Deutsch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     D.2.13 Software Engineering Reusable Software},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Major challenges that companies face nowadays are extremely volatile markets, a globally distributed supplier network and constantly changing business environments. These circumstances demand a high level of agility and extraordinary flexibility in the business modeling and the organizational structures of a company as well as adaptive and interoperable IT systems. In order to meet these requirements an integration of systems needs to be achieved. A possible solution for this problem is Champagne, which is a data propagation system that ensures the interoperability of enterprise applications at the data level. However, Champagne provides a tightly-coupled integration of applications and its architecture lacks the needed flexibility to link business processes. These deficiencies can be overcome with the adoption of a service-oriented architecture (SOA), based on loosely-coupled services, which enable a higher level of flexibility and interoperability. Therefore, we explore in this paper a number of options to reuse and integrate Champagne into a service-oriented architecture in order to benefit from SOA principles.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-130&engl=0}
}
@inproceedings {INPROC-2009-126,
   author = {Jorge M$\backslash$'$\backslash$inguez and Mih$\backslash$'aly Jakob and Uwe Heinkel and Bernhard Mitschang},
   title = {{A SOA-based approach for the integration of a data propagation system}},
   booktitle = {IRI'09: Proceedings of the 10th IEEE international conference on Information Reuse \& Integration},
   publisher = {IEEE Press},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {47--52},
   type = {Konferenz-Beitrag},
   month = {August},
   year = {2009},
   isbn = {978-1-4244-4114-3},
   language = {Englisch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     D.2.13 Software Engineering Reusable Software},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Major challenges that companies face nowadays are extremely volatile markets, a globally distributed supplier network and constantly changing business environments. These circumstances demand a high level of agility and extraordinary flexibility in the business modeling and the organizational structures of a company as well as adaptive and interoperable IT systems. In order to meet these requirements an integration of systems needs to be achieved. A possible solution for this problem is Champagne, which is a data propagation system that ensures the interoperability of enterprise applications at the data level. However, Champagne provides a tightly-coupled integration of applications and its architecture lacks the needed flexibility to link business processes. These deficiencies can be overcome with the adoption of a service-oriented architecture (SOA), based on loosely-coupled services, which enable a higher level of flexibility and interoperability. Therefore, we explore in this paper a number of options to reuse and integrate Champagne into a service-oriented architecture in order to benefit from SOA principles.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-126&engl=0}
}
@inproceedings {INPROC-2009-125,
   author = {Jorge M$\backslash$'$\backslash$inguez and Mih$\backslash$'aly Jakob and Uwe Heinkel and Bernhard Mitschang},
   title = {{A SOA-based approach for the integration of a data propagation system}},
   booktitle = {IRI'09: Proceedings of the 10th IEEE international conference on Information Reuse \& Integration},
   publisher = {IEEE Press},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {47--52},
   type = {Konferenz-Beitrag},
   month = {August},
   year = {2009},
   isbn = {978-1-4244-4114-3},
   language = {Englisch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     D.2.13 Software Engineering Reusable Software},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Major challenges that companies face nowadays are extremely volatile markets, a globally distributed supplier network and constantly changing business environments. These circumstances demand a high level of agility and extraordinary flexibility in the business modeling and the organizational structures of a company as well as adaptive and interoperable IT systems. In order to meet these requirements an integration of systems needs to be achieved. A possible solution for this problem is Champagne, which is a data propagation system that ensures the interoperability of enterprise applications at the data level. However, Champagne provides a tightly-coupled integration of applications and its architecture lacks the needed flexibility to link business processes. These deficiencies can be overcome with the adoption of a service-oriented architecture (SOA), based on loosely-coupled services, which enable a higher level of flexibility and interoperability. Therefore, we explore in this paper a number of options to reuse and integrate Champagne into a service-oriented architecture in order to benefit from SOA principles.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-125&engl=0}
}
@inproceedings {INPROC-2008-86,
   author = {Sylvia Radesch{\"u}tz and Bernhard Mitschang},
   title = {{An Annotation Approach for the Matching of Process Variables and Operational Business Data Models}},
   booktitle = {Proc. of the 21st International Conference on Computer Applications in Industry and Engineering (CAINE 2008)},
   address = {Honolulu, USA},
   publisher = {The International Society for Computers and Their Applications},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--6},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2008},
   language = {Englisch},
   cr-category = {D.2.2 Software Engineering Design Tools and Techniques,     D.2.12 Software Engineering Interoperability,     H.4.1 Office Automation,     H.5.2 Information Interfaces and Presentation User Interfaces},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Efficient adaptation to new situations of a company’s business and its business processes plays an important role for achieving advantages in competition to other companies. For an optimization of business processes, a profound analysis of all relevant business data in the company is necessary. Analyses typically specialize either on process analysis or on data warehousing of operational business data. However, to achieve a significantly more detailed analysis in order to fully optimize a company’s business, a consolidation of all major business data sources is indispensable. This paper introduces an approach that allows consolidating process variables and operational data models in a semi-automatic manner. In order to do this, a semantic annotation is applied. In this paper, we focus on an ontology-based annotation of the operational data in data warehouses, show how it is realized in a tool and discuss its general usability in other areas.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-86&engl=0}
}
@inproceedings {INPROC-2008-82,
   author = {Frank Wagner and Kathleen Krebs and Cataldo Mega and Bernhard Mitschang and Norbert Ritter},
   title = {{Email Archiving and Discovery as a Service}},
   booktitle = {Intelligent Distributed Computing, Systems and Applications; Proceedings of the 2nd International Symposium on Intelligent Distributed Computing: IDC 2008; Catania, Italy},
   editor = {Costin Badica and Giuseppe Mangioni and Vincenza Carchiolo and Dumitru Dan Burdescu},
   publisher = {Springer-Verlag},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Studies in Computational Intelligence},
   volume = {162},
   pages = {197--206},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2008},
   isbn = {978-3-540-85256-8},
   language = {Englisch},
   cr-category = {C.2.4 Distributed Systems,     H.3.2 Information Storage,     H.3.4 Information Storage and Retrieval Systems and Software},
   contact = {Frank Wagner frank.wagner@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Corporate governance and legislative regulations are forcing companies to extend their IT infrastructure by Email Archive and Discovery (EAD) systems for compliance reasons. Praxis shows that every installation is di??erent from another; not only in terms of the execution infrastructure, but also in terms of e.g. document and archiving procedures that map a company’s own business rules. As a consequence, EAD systems have to be highly customizable to their intended usages. For this purpose, we propose a service-oriented approach at various levels of detail that, on one hand, allows for describing EAD properties at the abstract (service) level and, on the other hand, supports the appropriate mapping of these services to the existing execution infrastructure. In this paper, we focus on the development and (architectural) design of an EAD system, which is well suited to fulfill these requirements. On the long run, we consider this solution as an important step on the way to an e??ective distributed and scalable approach, which, as we think, can be achieved by appropriate mechanisms of automatic workload management and dynamic provisioning of EAD services based on e.g. grid technology.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-82&engl=0}
}
@inproceedings {INPROC-2008-81,
   author = {Frank Wagner and Kathleen Krebs and Cataldo Mega and Bernhard Mitschang and Norbert Ritter},
   title = {{Towards the Design of a Scalable Email Archiving and Discovery Solution}},
   booktitle = {Proceedings of the 12th East-European Conference on Advances in Databases and Information Systems},
   editor = {Paolo Atzeni and Albertas Caplinskas and Hannu Jaakkola},
   publisher = {Sptringer-Verlag},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Lecture Notes in Computer Science},
   pages = {305--320},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2008},
   language = {Englisch},
   cr-category = {C.2.4 Distributed Systems,     H.3.2 Information Storage,     H.3.4 Information Storage and Retrieval Systems and Software},
   contact = {Frank Wagner frank.wagner@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {In this paper we propose a novel approach to specialize a general purpose Enterprise Content Management (ECM) System into an Email Archiving and Discovery (EAD) System. The magnitude and range of compliance risks associated with the management of EAD is driving investment in the development of more effective and efficient approaches to support regulatory compliance, legal discovery and content life-cycle needs. Companies must recognize and address requirements like legal compliance, electronic discovery, and document retention management. What is needed today are EAD systems capable to process very high message ingest rates, support distributed full text indexing, and allow forensic search such to support litigation cases. All this must be provided at lowest cost with respect to archive management and administration. In our approach we introduce a virtualized ECM repository interface where the key content repository components are wrapped into a set of tightly coupled Grid service entities, such to achieve scale-out on a cluster of commodity blade hardware that is automatically configured and dynamically provisioned. By doing so we believe, we can leverage the strength of Relational Database Management Systems and Full Text Indexes in a managed clustered environment with minimal operational overhead.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-81&engl=0}
}
@inproceedings {INPROC-2008-51,
   author = {Nicola H{\"o}nle and Matthias Grossmann and Daniela Nicklas and Bernhard Mitschang},
   title = {{Preprocessing Position Data of Mobile Objects}},
   booktitle = {Proceedings of the 9th International Conference on Mobile Data Management (MDM'08); Beijing, China, April 27-30, 2008.},
   publisher = {IEEE computer society},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {1--8},
   type = {Konferenz-Beitrag},
   month = {April},
   year = {2008},
   isbn = {978-0-7695-3154-0},
   language = {Deutsch},
   cr-category = {H.2.8 Database Applications,     G.1.2 Numerical Analysis Approximation},
   contact = {nicola.hoenle@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {We present the design and implementation of a component for the preprocessing of position data taken from moving objects. The movement of mobile objects is represented by piecewise functions over time that approximate the real object movement and significantly reduce the initial data volume such that efficient storage and analysis of object trajectories can be achieved. The maximal acceptable deviation---an input parameter of our algorithms---of the approximations also includes the uncertainty of the position sensor measurements. We analyze and compare five different lossy preprocessing methods. Our results clearly indicate that even with simple approaches, a more than sufficient overall performance can be achieved.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-51&engl=0}
}
@inproceedings {INPROC-2008-50,
   author = {Andreas Brodt and Daniela Nicklas and Sailesh Sathish and Bernhard Mitschang},
   title = {{Context-Aware Mashups for Mobile Devices}},
   booktitle = {Web Information Systems Engineering – WISE 2008 9th International Conference on Web Information Systems Engineering, Auckland, New Zealand, September 1-3, 2008, Proceedings},
   publisher = {Springer-Verlag},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   series = {Lecture Notes in Computer Science},
   type = {Konferenz-Beitrag},
   month = {Januar},
   year = {2008},
   keywords = {mashup, location-based services, Delivery Context Client Interfaces, DCCI, AJAX, context provisioning},
   language = {Englisch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     D.2.12 Software Engineering Interoperability,     H.5.1 Multimedia Information Systems,     H.5.4 Hypertext/Hypermedia,     H.2.5 Heterogeneous Databases},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2008-50/INPROC-2008-50.pdf},
   contact = {andreas.brodt@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {With the Web 2.0 trend and its participation of end-users more and more data and information services are online accessible, such as web sites, Wikis, or web services. So-called mashups---web applications that integrate data from more than one source into an integrated service---can be easily realized using scripting languages. Also, mobile devices are increasingly powerful, have ubiquitous access to the Web and feature local sensors, such as GPS. Thus, mobile applications can adapt to the mobile user's current situation. We examine how context-aware mashups can be created. One challenge is the provisioning of context data to the mobile application. For this, we discuss different ways to integrate context data, such as the user's position, into web applications. Moreover, we assess different data formats and the overall performance. Finally, we present the Telar Mashup Platform, a client-server solution for location-based mashups for mobile devices such as the Nokia N810 Internet Tablet.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-50&engl=0}
}
@inproceedings {INPROC-2008-49,
   author = {Matthias Grossmann and Nicola H{\"o}nle and Daniela Nicklas and Bernhard Mitschang},
   title = {{Reference Management in a Loosely Coupled, Distributed Information System}},
   booktitle = {Proceedings of the 12th East-European Conference on Advances in Databases and Information Systems},
   editor = {Paolo Atzeni and Albertas Caplinskas and Hannu Jaakkola},
   publisher = {Springer-Verlag},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   series = {Lecture Notes in Computer Science},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2008},
   language = {Englisch},
   cr-category = {E.2 Data Storage Representations,     H.2.2 Database Management Physical Design,     H.2.4 Database Management Systems},
   contact = {Matthias Grossmann matthias.grossmann@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {References between objects in loosely coupled distributed information systems pose a problem. On the one hand, one tries to avoid referential inconsistencies like, e.g., dangling links in the WWW. On the other hand, using strict constraints as in databases may restrict the data providers severely. We present the solution to this problem that we developed for the Nexus system. The approach tolerates referential inconsistencies in the data while providing consistent query answers to users. For traversing references, we present a concept based on return references. This concept is especially suitable for infrequent object migrations and provides a good query performance. For scenarios where object migrations are frequent, we developed an alternative concept based on a distributed hash table.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-49&engl=0}
}
@inproceedings {INPROC-2008-32,
   author = {Sylvia Radesch{\"u}tz and Florian Niedermann and Bernhard Mitschang},
   title = {{Ein Annotationsansatz zur Unterst{\"u}tzung einer ganzheitlichen Gesch{\"a}ftsanalyse}},
   booktitle = {Proc. of the 5th Conference on Data Warehousing: Synergien durch Integration und Informationslogistik. (DW2008); St. Gallen, 27.-28. Oktober, 2008},
   publisher = {Lecture Notes in Informatics (LNI)},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--19},
   type = {Konferenz-Beitrag},
   month = {Oktober},
   year = {2008},
   language = {Deutsch},
   cr-category = {D.2.2 Software Engineering Design Tools and Techniques,     D.2.12 Software Engineering Interoperability,     H.4.1 Office Automation,     H.5.2 Information Interfaces and Presentation User Interfaces},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Die Verbesserung der Gesch{\"a}ftsprozesse in einem Unternehmen spielt eine immer wichtigere Rolle, um Wettbewerbsvorteile gegen{\"u}ber der Konkurrenz zu erlangen. Daf{\"u}r ist eine umfassende Analyse n{\"o}tig {\"u}ber alle verf{\"u}gbaren Informationen in diesem Unternehmen. Aktuelle Verfahren konzentrieren sich entweder auf die Analyse von Prozessdaten oder die Analyse von operativen Anwendungsdaten, die typischerweise in einem Data Warehouse vorliegen. F{\"u}r die Ausf{\"u}hrung einer tiefergehenden Analyse ist es jedoch notwendig, Prozessdaten und operative Daten zu verkn{\"u}pfen. Dieser Beitrag stellt zwei Ans{\"a}tze vor, welche es erm{\"o}glichen, diese Daten effektiv und flexibel zusammenzuf{\"u}hren. Der erste Ansatz stellt eine direkte Verkn{\"u}pfung von Entit{\"a}ten aus den Prozessdaten mit Entit{\"a}ten aus den operativen Daten her. Die Verkn{\"u}pfung im zweiten Ansatz beruht hingegen auf der semantischen Beschreibung der Daten. Beide Methoden sind in einem Werkzeug realisiert.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-32&engl=0}
}
@inproceedings {INPROC-2008-05,
   author = {Sylvia Radesch{\"u}tz and Bernhard Mitschang and Frank Leymann},
   title = {{Matching of Process Data and Operational Data for a Deep Business Analysis}},
   booktitle = {Proc. of the 4th International Conference on Interoperability for Enterprise Software and Applications (I-ESA 2008), Berlin, M{\"a}rz 26-28, 2008.},
   address = {London},
   publisher = {Springer-Verlag},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {171--182},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {2008},
   doi = {10.1007/978-1-84800-221-0_14},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems},
   ee = {http://www.aidima.es/iesa2008/},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen;     Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Efficient adaptation to new situations of a company's business and its business processes plays an important role for achieving advantages in competition to other companies. For an optimization of processes, a profound analysis of all relevant information in the company is necessary. Analyses typically specialize either on process analysis or on data warehousing of operational data. A consolidation of business data is needed, i.e. of internal process execution data and external operational data, in order to allow for interoperability between these major business data sources to analyze and optimize processes in a much more comprehensive scope. This paper introduces a framework that offers various data descriptions to reach an efficient matching of process data and operational data, and shows its enhancement compared to separate analyses and other matching approaches.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-05&engl=0}
}
@inproceedings {INPROC-2008-02,
   author = {Marko Vrhovnik and Holger Schwarz and Sylvia Radesch{\"u}tz and Bernhard Mitschang},
   title = {{An Overview of SQL Support in Workflow Products}},
   booktitle = {Proc. of the 24th International Conference on Data Engineering (ICDE 2008), Canc{\'u}n, M{\'e}xico, April 7-12, 2008},
   publisher = {IEEE},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--8},
   type = {Konferenz-Beitrag},
   month = {April},
   year = {2008},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Over the last years, data management products as well as workflow products have established themselves as indispensable building blocks for advanced IT systems in almost all application areas. Recently, many vendors have created innovative product extensions that combine service-oriented frameworks with powerful workflow and data management capabilities. In this paper, we discuss several workflow products from different vendors with a specific focus on their SQL support. We provide a comparison based on a set of important data management patterns and illustrate the characteristics of various approaches by means of a running example.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-02&engl=0}
}
@inproceedings {INPROC-2007-64,
   author = {Nazario Cipriani and Matthias Grossmann and Daniela Nicklas and Bernhard Mitschang},
   title = {{Federated Spatial Cursors}},
   booktitle = {Proceedings of the IX Brazilian Symposium on Geoinformatics},
   address = {S{\~a}o Jos{\'e} dos Campos, Brazil},
   publisher = {Online},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {85--96},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2007},
   isbn = {978-85-17-00036-2},
   keywords = {nexus; cursor; federation; spatial},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems,     H.3.4 Information Storage and Retrieval Systems and Software},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2007-64/INPROC-2007-64.pdf,     http://www.geoinfo.info/geoinfo2007/anais_geoinfo2007.pdf},
   contact = {Senden Sie eine E-mail an Nazario.Cipriani@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The usage of small mobile devices for data-intensive applications becomes more and more self-evident. As a consequence we have to consider these devices and their inherent characteristics in future system designs, like the limitations of memory and communication bandwidth. For example, when querying data servers for information, a mobile application can hardly anticipate the size of the result set. Our approach is to give more control over the data delivery process to the application, so that it can be adapted regarding its device status, the costs and availability of communication channels, and the user’s needs. This paper introduces a flexible and scalable approach by providing spatially federated cursor functionality. It is based on an open federation over a set of loosely coupled data sources that provide simple object retrieval interfaces.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-64&engl=0}
}
@inproceedings {INPROC-2007-59,
   author = {Jing Lu and Bernhard Mitschang},
   title = {{DIS-CS: Improving Enterprise Data Integration by Constraint Service}},
   booktitle = {ISCA 20th INTERNATIONAL CONFERENCE ON COMPUTER APPLICATIONS IN INDUSTRY AND ENGINEERING, November 7-9, 2007, San Francisco, California, USA.},
   publisher = {The International Society for Computers and Their Applications},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {212--217},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2007},
   isbn = {978-1-880843-65-9},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {This paper presents an approach to provide a Constraint Service for XML-based Data Integration Systems. Constraints from local data sources and global constraints are expressed in a uniform constraint model based on Active XQuery and are stored in Constraint Repository. We introduce the concept of Constraint Wrapper and translate local constraints from data sources automatically into the uniform constraint model through Constraint Wrapper with the help of schema mapping information. The Constraint Service can be published as web service and be invoked by different Data Integration Systems. Through the Constraint Service, queries can be optimized, and global updates can be checked for validity and integrity. We establish the DIS-CS system to facilitate both the architecture and the implementation. We carry out experiments of Semantic Query Optimization using Constraints. The results show that the performance of the queries is greatly improved.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-59&engl=0}
}
@inproceedings {INPROC-2007-29,
   author = {Rodrigo Monteiro and Geraldo Zimbrao and Holger Schwarz and Bernhard Mitschang and Jano Souza},
   title = {{DWFIST: Leveraging Calendar-based Pattern Mining in Data Streams}},
   booktitle = {Proc. of the 9th International Conference on Data Warehousing and Knowledge Discovery (DaWaK 2007) Regensburg, Germany, 3-7 September, 2007},
   publisher = {-},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {438--448},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2007},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Calendar-based pattern mining aims at identifying patterns on specific calendar partitions. Potential calendar partitions are for example: every Monday, every first working day of each month, every holiday. Providing flexible mining capabilities for calendar-based partitions is especially challenging in a data stream scenario. The calendar partitions of interest are not known a priori and at each point in time only a subset of the detailed data is available. We show how a data warehouse approach can be applied to this problem. The data warehouse that keeps track of frequent itemsets holding on different partitions of the original stream has low storage requirements. Nevertheless, it allows to derive sets of patterns that are complete and precise. This work demonstrates the effectiveness of our approach by a series of experiments.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-29&engl=0}
}
@inproceedings {INPROC-2007-28,
   author = {Marko Vrhovnik and Holger Schwarz and Oliver Suhre and Bernhard Mitschang and Volker Markl and Albert Maier and Tobias Kraft},
   title = {{An Approach to Optimize Data Processing in Business Processes}},
   booktitle = {Proc. of the 33rd International Conference on Very Large Data Bases (VLDB 2007), Vienna, Austria, September 23-28, 2007},
   publisher = {-},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--12},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2007},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {In order to optimize their revenues and profits, an increasing number of businesses organize their business activities in terms of business processes. Typically, they automate important business tasks by orchestrating a number of applications and data stores. Obviously, the performance of a business process is directly dependent on the efficiency of data access, data processing, and data management. In this paper, we propose a framework for the optimization of data processing in business processes. We introduce a set of rewrite rules that transform a business process in such a way that an improved execution with respect to data management can be achieved without changing the semantics of the original process. These rewrite rules are based on a semi-procedural process graph model that externalizes data dependencies as well as control flow dependencies of a business process. Furthermore, we present a multi-stage control strategy for the optimization process. We illustrate the benefits and opportunities of our approach through a prototype implementation. Our experimental results demonstrate that independent of the underlying database system performance gains of orders of magnitude are achievable by reasoning about data and control in a unified framework.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-28&engl=0}
}
@inproceedings {INPROC-2007-125,
   author = {Nazario Cipriani and Matthias Gro{\ss}mann and Daniela Nicklas and Bernhard Mitschang},
   title = {{Federated Spatial Cursors}},
   booktitle = {IX Brazilian Symposium on Geoinformatics, 25-28 November, Campos do Jordao, Sao Paulo, Brazil},
   publisher = {INPE},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {85--96},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2007},
   language = {Englisch},
   cr-category = {C.2.4 Distributed Systems,     H.2.4 Database Management Systems,     H.3.3 Information Search and Retrieval},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The usage of small mobile devices for data-intensive applications becomes more and more self-evident. As a consequence we have to consider these devices and their inherent characteristics in future system designs, like the limitations of memory and communication bandwidth. For example, when querying data servers for information, a mobile application can hardly anticipate the size of the result set. Our approach is to give more control over the data delivery process to the application, so that it can be adapted regarding its device status, the costs and availability of communication channels, and the user’s needs. This paper introduces a flexible and scalable approach by providing spatially federated cursor functionality. It is based on an open federation over a set of loosely coupled data sources that provide simple object retrieval interfaces.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-125&engl=0}
}
@inproceedings {INPROC-2007-108,
   author = {Ralf Wagner and Bernhard Mitschang},
   title = {{A Methodology and Guide for Effective Reuse in Integration Architectures for Enterprise Applications}},
   booktitle = {Distributed Objects and Applications (DOA) 2007 International Conference},
   publisher = {.},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {522--539},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2007},
   language = {Englisch},
   cr-category = {A.0 General Literature, General},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-108&engl=0}
}
@inproceedings {INPROC-2007-107,
   author = {Ralf Wagner and Bernhard Mitschang},
   title = {{Flexible Reuse of Middleware Infrastructures in Heterogeneous IT Environments}},
   booktitle = {Proceedings of the IEEE International Conference on Information Reuse and Integration},
   publisher = {.},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {323--328},
   type = {Konferenz-Beitrag},
   month = {August},
   year = {2007},
   language = {Englisch},
   cr-category = {A.0 General Literature, General},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-107&engl=0}
}
@inproceedings {INPROC-2007-106,
   author = {Ralf Wagner and Bernhard Mitschang},
   title = {{A Virtualization Approach for Reusing Middleware Adapters}},
   booktitle = {Proceedings of the Ninth International Conference on Enterprise Information Systems},
   publisher = {.},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {78--85},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2007},
   language = {Englisch},
   cr-category = {A.0 General Literature, General},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-106&engl=0}
}
@inproceedings {INPROC-2007-105,
   author = {Tobias Kraft and Holger Schwarz and Bernhard Mitschang},
   title = {{A Statistics Propagation Approach to Enable Cost-Based Optimization of Statement Sequences}},
   booktitle = {Proc. of the 11th East European Conference on Advances in Databases and Information Systems (ADBIS 2007), Varna, Bulgaria, September 29 - October 3, 2007},
   publisher = {-},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {267--282},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2007},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-105&engl=0}
}
@inproceedings {INPROC-2007-104,
   author = {Tobias Kraft and Bernhard Mitschang},
   title = {{Statistics API: DBMS-Independent Access and Management of DBMS Statistics in Heterogeneous Environments}},
   booktitle = {Proc. of the 9th International Conference on Enterprise Information Systems (ICEIS 2007), Funchal, Madeira, Portugal, June 12-16, 2007},
   publisher = {-},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {5--12},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2007},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-104&engl=0}
}
@inproceedings {INPROC-2007-03,
   author = {Clemens Dorda and Uwe Heinkel and Bernhard Mitschang},
   title = {{Improving Application Integration with Model-Driven Engineering}},
   booktitle = {Proceedings of International Conference on Information Technology and Management 2007 : ICITM 2007; Hong Kong, China, January 3-5, 2007},
   editor = {Chan Man-Chung and James N.K. Liu and Ronnie Cheung and Joe Zhou},
   address = {Hong Kong},
   publisher = {ISM Press},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {94--101},
   type = {Konferenz-Beitrag},
   month = {Januar},
   year = {2007},
   isbn = {988-97311-5-0},
   keywords = {Enterprise Application Integration, Model-Driven Engineering, Software Lifecycle, EAI, MDA, MDE, UML, Unified Modeling Language},
   language = {Englisch},
   cr-category = {D.2.2 Software Engineering Design Tools and Techniques,     D.2.13 Software Engineering Reusable Software,     I.6.5 Model Development},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2007-03/INPROC-2007-03.pdf},
   contact = {Write message to Clemens.Dorda@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Modern software for Enterprise Application Integration (EAI)provide tools for modeling integration scenarios. A drawback ofthese tools is the missing functionality to exchange or integratemodels of different EAI products. Consequently, developers candescribe real heterogeneous IT environments only partially. Ourgoal is to avoid the creation of these so-called ‘integrationislands’. For that purpose we present an approach whichintroduces an abstract view by technology-independent andmultivendor-capable modeling for both development andmaintenance. With this approach, we propose a toolset- andrepository-based refinement of the abstract view to automate theimplementation with real products and the deployment on realplatforms.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-03&engl=0}
}
@inproceedings {INPROC-2006-56,
   author = {Christoph Mangold and Holger Schwarz and Bernhard Mitschang},
   title = {{Symbiosis in the Intranet: How Document Retrieval Benefits from Database Information}},
   booktitle = {13th International Conference on Management of Data (COMAD 2006), December 14-16, 2006, Delhi, India},
   editor = {L. V. S. Lakshmanan and P. Roy and A. K. H. Tung},
   address = {New Delhi},
   publisher = {Tata McGraw-Hill Publishing Company Limited},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {201--204},
   type = {Konferenz-Beitrag},
   month = {Dezember},
   year = {2006},
   isbn = {0-07-063374-6},
   language = {Englisch},
   cr-category = {H.3.3 Information Search and Retrieval},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2006-56/INPROC-2006-56.pdf},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The enterprise information space is split in two hemispheres. Documents contain unstructured or semistructured information; structured information is stored in databases. As regards the content, both kinds of information are complementary parts. However, enterprise information systems usually focus on one part, only. Our approach improves document retrieval in the intranet by exploiting the enterprise's databases. In particular, we exploit database information to describe the context of documents and exploit this context to enhance common full text search. In this paper, we show how to model and compute document context and present results on runtime performance},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2006-56&engl=0}
}
@inproceedings {INPROC-2006-54,
   author = {Thomas Schwarz and Matthias Grossmann and Daniela Nicklas and Bernhard Mitschang},
   title = {{Exploiting Type and Space in a Main Memory Query Engine}},
   booktitle = {Proceedings of the VIII Brazilian Symposium on GeoInformatics : GeoInfo2006 ; Campos do Jord{\~a}o, Brazil, November 19-22, 2006},
   editor = {Clodoveu Augusto Davis Junior and Antonio Miguel Vieira Monteiro},
   publisher = {INPE},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {35--52},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2006},
   isbn = {9 788517 000270},
   keywords = {Main Memory Query Engine, Indexing, Spatial Index, Type Hierarchies, Deployable Query Engine},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems,     H.3.1 Content Analysis and Indexing,     H.2.8 Database Applications},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2006-54/INPROC-2006-54.pdf},
   contact = {Thomas Schwarz schwarts@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {More and more spatial data is accessible over the web or through portals of wireless service providers. In this context the main selection criteria for the data are the type of the requested data objects and their position in the real world. Integration and performance issues are challenged by the need to process ad hoc queries in an interactive fashion. In this paper we investigate how a main memory query engine can be used to meet these requirements. It has the added benefit of being easily deployable to many components in a large-scale data integration system. Hence, we analyze how such a query engine can best exploit the query characteristics by employing an index structure that leverages spatial and type dimensions. In order to support query processing in the best possible way we investigate a specific multi-dimensional main memory index structure. Compared to the straightforward approach using separate indexes on type and position we can increase the performance up to almost an order of magnitude in several important usage scenarios. This requires to tweak the mapping of type IDs to values in the type dimension, which we discuss extensively. This enables the overall system to be used interactively, even with large data sets.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2006-54&engl=0}
}
@inproceedings {INPROC-2006-52,
   author = {Christoph Mangold and Holger Schwarz and Bernhard Mitschang},
   title = {{u38: A Framework for Database-Supported Enterprise Document-Retrieval}},
   booktitle = {Proceedings of the Tenth International Database Engineering \& Apllications Symposium (IDEAS2006), Delhi, India, December 11-14, 2006},
   publisher = {IEEE Computer Society},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--8},
   type = {Konferenz-Beitrag},
   month = {Dezember},
   year = {2006},
   language = {Englisch},
   cr-category = {H.3.3 Information Search and Retrieval},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {In enterprises, information is encoded in documents and databases. Logically, the information in both worlds is tightly connected, however, on the system level there is usu- ally a large gap. In this paper, we propose a framework that improves document retrieval by exploiting available enter- prise databases. In particular, we use database information to model the context of documents and incorporate this con- text in our search framework. We present our framework architecture, its components and its major interfaces. The framework can be configured and enhanced at well-defined points and, hence, can easily be customized to other do- mains. We furthermore evaluate its core components. Our experiments show that the context-aware approach signifi- cantly improves the quality of search results.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2006-52&engl=0}
}
@inproceedings {INPROC-2006-49,
   author = {Mih{\'a}ly Jakob and Holger Schwarz and Fabian Kaiser and Bernhard Mitschang},
   title = {{Towards an operation model for generated web applications}},
   booktitle = {Workshop proceedings of the sixth international conference on Web engineering (MDWE 2006); Palo Alto, California, USA, July 2006},
   editor = {Association for Computing Machinery (ACM)},
   address = {New York},
   publisher = {ACM Press New York, NY, USA},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2006},
   language = {Englisch},
   cr-category = {D.2.3 Software Engineering Coding Tools and Techniques,     D.2.11 Software Engineering Software Architectures,     H.4 Information Systems Applications,     H.5.4 Hypertext/Hypermedia},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {This paper describes a new approach for the development of data-intensive web applications that depend on non-trivial data manipulation. E-Commerce web sites, on-line auction systems and large enterprise web portals fall into this category as they require comprehensive data access, data processing and data manipulation capabilities. However, existing methodologies mainly concentrate on modeling content, navigation and presentation aspects of read-only web sites. Approaches that consider modeling data operations incorporate them into existing models resulting in a less clear design. We argue that existing models are not sufficient to express complex operations that access or modify web application content. Therefore, we propose an additional Operation Model defining operations for data-intensive web applications. We also propose the utilization of a web application generator to create an Operation Layer based on this Operation Model.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2006-49&engl=0}
}
@inproceedings {INPROC-2006-48,
   author = {Mih{\'a}ly Jakob and Holger Schwarz and Fabian Kaiser and Bernhard Mitschang},
   title = {{Modeling and Generating Application Logic for Data-Intensive Web Applications}},
   booktitle = {Proceedings of the 6th international conference on Web engineering (ICWE2006); Palo Alto, California, USA, July 2006},
   editor = {Association for Computing Machinery (ACM)},
   address = {New York},
   publisher = {ACM Press New York, NY, USA},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {77--84},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2006},
   language = {Englisch},
   cr-category = {D.2.3 Software Engineering Coding Tools and Techniques,     D.2.11 Software Engineering Software Architectures,     H.4 Information Systems Applications,     H.5.4 Hypertext/Hypermedia},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {This paper presents a new approach for the development of data-intensive web applications that depend on sophisticated application logic. E-Commerce web sites, on-line auction systems and large enterprise web portals fall into this category as they require comprehensive data access, data processing and data manipulation capabilities. However, existing methodologies mainly concentrate on modeling content, navigation and presentation aspects of read-only web sites. In our opinion these models are not sufficient to express complex operations that access or modify web application content. Therefore, we propose an additional Operation Model defining the application logic of a web application. We show that based on this model a significant part of a web application’s Operation Layer can be generated, still allowing the manual implementation of arbitrary additional functionality. We evaluate our approach and present experimental results based on a large example application for the area of innovation management.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2006-48&engl=0}
}
@inproceedings {INPROC-2005-84,
   author = {Uwe Heinkel and Carmen Constantinescu and Bernhard Mitschang},
   title = {{Integrating Data Changes with Data from Data Service Providers}},
   booktitle = {Proceedings of the 18th International Conference on Computer Applications in Industry and Engineering (CAINE 2005)},
   publisher = {ICSA},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {146--151},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2005},
   language = {Englisch},
   cr-category = {H.2.5 Heterogeneous Databases},
   contact = {Uwe.Heinkel@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {In these days, enterprises are more and more confronted to fast changing and turbulent markets. In order to remain competitive, they have to quickly adapt themselves to these new situations. The integration solutions employed in such an environment enable the required agility, by using a loose and flexible integration architecture. We propose an integration solution based on the concept of propagating data changes from one information system to the affected information systems. This paper focuses on the question: how data from other data services can be accessed and exploited to enhance a data change propagation system. These services are based on a service-oriented architecture (SOA) using XML technologies (e.g. SOAP). We developed the Data Service Description Language (DSDL) to define the data structures of the data service. This description is used to create the above mentioned Transformation Scripts as well as to generate requests for accessing remote data. A layered system approach is introduced to facilitate both the architecture and its implementation.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-84&engl=0}
}
@inproceedings {INPROC-2005-57,
   author = {Christoph Mangold and Holger Schwarz and Bernhard Mitschang},
   title = {{Improving Intranet Search Engines Using Context Information from Databases}},
   booktitle = {Proceedings of the 14th ACM International Conference on Information and Knowledge Management (CIKM 2005), Bremen, Germany, October 31 - November 5, 2005},
   editor = {A. Chowdhury and N. Fuhr and M. Ronthaler and H.-J. Schek and W. Teiken},
   publisher = {ACM Press},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {349--350},
   type = {Konferenz-Beitrag},
   month = {Oktober},
   year = {2005},
   isbn = {1-59593-140-6},
   language = {Englisch},
   cr-category = {H.3.3 Information Search and Retrieval},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2005-57/INPROC-2005-57.pdf},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Information in enterprises comes in documents and databases. From a semantic viewpoint, both kinds of information are usually tightly connected. In this paper, we propose to enhance common search-engines with contextual information retrieved from databases. We establish system requirements and anecdotally demonstrate how documents and database information can be represented as the nodes of a graph. Then, we give an example how we exploit this graph information for document retrieval.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-57&engl=0}
}
@inproceedings {INPROC-2005-33,
   author = {Carmen Constantinescu and Uwe Heinkel and Jan Le Blond and Stephan Schreiber and Bernhard Mitschang and Engelbert Westk{\"a}mper},
   title = {{Flexible Integration of Layout Planning and Adaptive Assembly Systems in Digital Enterprises}},
   booktitle = {Proceedings of the 38th CIRP International Seiminar on Manufacturing Systems (CIRP ISMS)},
   publisher = {CIRP},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {10--18},
   type = {Konferenz-Beitrag},
   month = {Mai},
   year = {2005},
   language = {Deutsch},
   cr-category = {J.6 Computer-Aided Engineering,     C.2.4 Distributed Systems},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2005-33/INPROC-2005-33.pdf},
   contact = {Uwe.Heinkel@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The agility and adaptability of an enterprise becomes more and more a desired feature and a key factor of success. Especially manufacturing enterprises have to be able to respond quickly to both external and internal changes. This includes the ability to plan the optimal factory configuration and the assembly processes with little effort in short time. The tools of the Digital Factory support manufacturing enterprises shortening planning time and increasing planning quality. As a motivation of our work in the field of Enterprise Application Integration, a scenario is described, which reveals how a transformable manufacturing enterprise reacts to market changes with the support of digital tools. Motivated by our scenario we developed an integration solution called Stuttgart Integration Platform. A main role in our approach plays a solution for the Digital Factory, which stores all needed information about the planned facility layouts and assembly processes. The paper presents our central integration solution Champagne, and three integrated systems: the Digital Factory Solution, the Factory Planning Table and the Assembly Configuration Tool.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-33&engl=0}
}
@inproceedings {INPROC-2005-17,
   author = {Rodrigo Salvador Monteiro and Geraldo Zimbrao and Holger Schwarz and Bernhard Mitschang and Jano Moreira De Souza},
   title = {{Building the Data Warehouse of Frequent Itemsets in the DWFIST Approach}},
   booktitle = {Proceedings of the 15th International Symposium on Methodologies for Intelligent Systems Saratoga Springs, New York - May 25-28, 2005},
   publisher = {Springer},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--9},
   type = {Konferenz-Beitrag},
   month = {Mai},
   year = {2005},
   isbn = {3-540-25878-7},
   language = {Englisch},
   cr-category = {H.2.7 Database Administration,     H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Some data mining tasks can produce such great amounts of data that we have to cope with a new knowledge management problem. Frequent itemset mining fits in this category. Different approaches were proposed to handle or avoid somehow this problem. All of them have problems and limitations. In particular, most of them need the original data during the analysis phase, which is not feasible for data streams. The DWFIST (Data Warehouse of Frequent ItemSets Tactics) approach aims at providing a powerful environment for the analysis of itemsets and derived patterns, such as association rules, without accessing the original data during the analysis phase. This approach is based on a Data Warehouse of Frequent Itemsets. It provides frequent itemsets in a flexible and efficient way as well as a standardized logical view upon which analytical tools can be developed. This paper presents how such a data warehouse can be built.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-17&engl=0}
}
@inproceedings {INPROC-2005-111,
   author = {Albert Maier and Bernhard Mitschang and Frank Leymann and Wolfson Dan},
   title = {{On Combining Business Process Integration and ETL Technologies}},
   booktitle = {Datenbanksysteme in Business, Technologie und Web (BTW'05)},
   editor = {Gesellschaft f{\"u}r Informatik},
   publisher = {K{\"o}llen},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {533--546},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {2005},
   isbn = {3-88579-394-6},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications,     H.3.3 Information Search and Retrieval},
   ee = {http://btw2005.aifb.uni-karlsruhe.de/},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen, Architektur von Anwendungssystemen;     Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {On Combining Business Provess Integration and ETL Technologies},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-111&engl=0}
}
@inproceedings {INPROC-2005-110,
   author = {Cataldo Mega and Frank Wagner and Bernhard Mitschang},
   title = {{From Content Management to Enterprise Content Management}},
   booktitle = {Datenbanksysteme in Business, Technologie und Web},
   editor = {Gesellschaft f{\"u}r Informatik},
   publisher = {K{\"o}llen},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {596--613},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {2005},
   isbn = {3-88579-394-6},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications,     H.3.3 Information Search and Retrieval},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {In this paper we will provide a step-by-step description on what it means to evolve the architecture of a traditional content management system into an Enterprise Content Management (ECMS) system, explain the differences of both systems and motivate that this transformation is necessary. By analyzing business scenarios in the realm of different content management domains, we will explain why today’s content management systems struggle when it comes to satisfy the need for performance, scalability and business resilience. Using the system design of IBM DB2 Content Manager as reference point we will outline and discuss some of the new key technical challenges found when promoting ondemand ECM services and look at their affordability. By detailing a few representative use cases we will perform a problem analysis, and an attempt will be made to present an enhanced ECM system design that makes use of a component ‚virtualization’ approach in order to allow for a dynamic services infrastructure to be setup and which capitalizes on proven peer-peer and grid technology.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-110&engl=0}
}
@inproceedings {INPROC-2005-06,
   author = {Mih{\'a}ly Jakob and Matthias Grossmann and Daniela Nicklas and Bernhard Mitschang},
   title = {{DCbot: Finding Spatial Information on the Web}},
   booktitle = {Proceedings of the 10th International Conference on Database Systems for Advanced Applications (DASFAA 2005)},
   address = {Beijing},
   publisher = {??},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   type = {Konferenz-Beitrag},
   month = {April},
   year = {2005},
   language = {Deutsch},
   cr-category = {H.2.8 Database Applications,     H.3.3 Information Search and Retrieval,     H.5.4 Hypertext/Hypermedia},
   ee = {http://www.nexus.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The WWW provides an overwhelming amount of information, which spatially indexed can be a valuable additional data source for location- based applications. By manually building a spatial index, only a fraction of the available resources can be covered. This paper introduces a system for the automatic mapping of web pages to geographical locations. Our web robot uses several sets of domain specific keywords, lexical context rules, that are automatically learned, and a hierarchical catalogue of geographical locations that provides exact geographical coordinates for locations. Spatially indexed web pages are used to construct Geographical Web Portals, which can be accessed by different location-based applications. In addition, we present experimental results demonstrating the quantity and the quality of automatically indexed web pages.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-06&engl=0}
}
@inproceedings {INPROC-2004-42,
   author = {Thomas Schwarz and Markus Iofcea and Matthias Grossmann and Nicola H{\"o}nle and Daniela Nicklas and Bernhard Mitschang},
   title = {{On Efficiently Processing Nearest Neighbor Queries in a Loosely Coupled Set of Data Sources}},
   booktitle = {Proceedings of the 12th ACM International Symposium on Advances in Geographic Information System (ACM GIS 004), Washington D.C., November 12-13, 2004},
   editor = {ACM},
   publisher = {?},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2004},
   keywords = {Data integration, distributed query processing, federated database system, kNN, nearest neighbors, parallel query processing},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems,     H.2.8 Database Applications,     H.3.3 Information Search and Retrieval},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2004-42/INPROC-2004-42.pdf},
   contact = {thomas.schwarz@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {We propose a family of algorithms for processing nearest neighbor (NN) queries in an integration middleware that provides federated access to numerous loosely coupled, autonomous data sources connected through the internet. Previous approaches for parallel and distributed NN queries considered all data sources as relevant, or determined the relevant ones in a single step by exploiting additional knowledge on object counts per data source. We propose a different approach that does not require such detailed statistics about the distribution of the data. It iteratively enlarges and shrinks the set of relevant data sources. Our experiments show that this yields considerable performance benefits with regard to both response time and effort. Additionally, we propose to use only moderate parallelism instead of querying all relevant data sources at the same time. This allows us to trade a slightly increased response time for a lot less effort, hence maximizing the cost profit ratio, as we show in our experiments. Thus, the proposed algorithms clearly extend the set of NN algorithms known so far.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2004-42&engl=0}
}
@inproceedings {INPROC-2004-41,
   author = {Daniela Nicklas and Nicola H{\"o}nle and Michael Moltenbrey and Bernhard Mitschang},
   title = {{Design and Implementation Issues for Explorative Location-based Applications: the NexusRallye}},
   booktitle = {Proceedings for the VI Brazilian Symposium on GeoInformatics: GeoInfo 2004; November 22-24, 2004},
   editor = {Gilberto Camara Cirano Iochpe},
   address = {Sao Jose dos Campos},
   publisher = {INPE},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {167--181},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2004},
   isbn = {3-901882-20-0},
   keywords = {location-based services, context-awareness, mobile applications},
   language = {Deutsch},
   cr-category = {H.2.8 Database Applications,     H.5.3 Group and Organization Interfaces},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2004-41/INPROC-2004-41.pdf,     http://www.nexus.uni-stuttgart.de},
   contact = {danickla@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Explorative Location-based Applications (eLBA) define a new class of applications that rely on both positioning (i.e. location information) and georeferenced information in addition to a flexible and efficient system infrastructure that supports a mobile and ubiquitous usage. In this paper we define first a modeling framework to design eLBAs that builds on the concept of tasks as a very valuable system/user interaction and application structuring concept. In addition, we report on a system framework, the Nexus platform, that efficiently provides access to georeferenced information and positioning information. Our sample application, the NexusRallye, is used to exemplify important aspects of our solution platform and to show its advantages as compared to other approaches.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2004-41&engl=0}
}
@inproceedings {INPROC-2004-40,
   author = {Thomas Schwarz and Nicola H{\"o}nle and Matthias Grossmann and Daniela Nicklas and Bernhard Mitschang},
   title = {{Efficient Domain-Specific Information Integration in Nexus}},
   booktitle = {Proceedings of the 2004 VLDB Workshop on Information Integration on the Web : IIWeb-2004 ; Toronto, Canada, August 30, 2004},
   editor = {Hasan Davulcu and Nick Kushmerick},
   publisher = {online},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {122--127},
   type = {Konferenz-Beitrag},
   month = {August},
   year = {2004},
   keywords = {Nexus; information integration; domain-specific},
   language = {Englisch},
   cr-category = {H.3.4 Information Storage and Retrieval Systems and Software,     H.3.3 Information Search and Retrieval},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2004-40/INPROC-2004-40.pdf,     http://cips.eas.asu.edu/iiwebfinalproceedings/52.pdf,     http://cips.eas.asu.edu/iiweb-proceedings.html},
   contact = {thomas.schwarz@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {In this paper, we present the Nexus approach to efficient domain-specific integration of many loosely coupled data sources. A so called information maximizing mediation middleware (IMMM) has to cope with large data volumes and many queries, and at the same time achieve a tight semantic integration for the data instances. For efficiency and practicability reasons, we propose to use an extensible global schema and a limited domain-specific query language. This facilitates employing domain-specific semantic knowledge in the middleware: detect duplicates, merge multiple representations, aggregate and generalize information. Finally, we present a working prototype tailored to the domain of context-aware applications.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2004-40&engl=0}
}
@inproceedings {INPROC-2003-40,
   author = {Christoph Mangold and Bernhard Mitschang},
   title = {{Enabling a Reuse Oriented Engineering Methodology}},
   booktitle = {Proceedings of the Second IASTED International Conference on Information and Knowledge Sharing},
   editor = {Wesley Chu},
   address = {Anaheim, Calgary, Zurich},
   publisher = {ACTA Press},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {6--11},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2003},
   isbn = {0-88986-396-2},
   keywords = {Engineering Methodology; Reuse; Representation; Knowledge Engineering and Management},
   language = {Englisch},
   cr-category = {H.1.1 Systems and Information Theory,     H.1.2 User/Machine Systems},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {In the domain of machine engineering the reuse of design data is an important but complex instrument to improve quality and shorten development time. The successful reuse of design data requires both, a machine engineering specific methodology and the support by appropriate information system technology. In this paper we introduce a machine engineering methodology that is based on several reuse stages. Our approach to support the methodology with an appropriate information system uses a simple graph based information model where reuse stages are re- flected as layers. We also show how reuse is represented in the model.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2003-40&engl=0}
}
@inproceedings {INPROC-2003-27,
   author = {Marcello Mariucci and Bernhard Mitschang},
   title = {{Extending Web Service Technology towards an Earth Observation Integration Framework}},
   booktitle = {Proceedings of the Forum Session at the First International Conference on Service Oriented Computing: ICSOC03; Trento, Italy, Dec. 15-18, 2003},
   editor = {Marco Aiello and Chris Bussler and Vincenzo D'Andrea and Jian. Yang},
   address = {Trento},
   publisher = {University of Trento},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Technical Report, Information and Communication Technology, University of Trento},
   volume = {DIT-03-056},
   pages = {117--128},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2003},
   keywords = {Earth Observation; Framework; Web Services; Repository; Workflow Management},
   language = {Englisch},
   cr-category = {D.2.2 Software Engineering Design Tools and Techniques,     D.2.6 Software Engineering Programming Environments,     D.2.9 Software Engineering Management,     D.2.10 Software Engineering Design,     D.2.11 Software Engineering Software Architectures,     D.2.13 Software Engineering Reusable Software,     H.3.4 Information Storage and Retrieval Systems and Software,     H.4 Information Systems Applications},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2003-27/INPROC-2003-27.pdf,     ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2003-27/INPROC-2003-27.ps},
   contact = {mariucci@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {In this paper we describe the implementation of a service-based application integration solution for the complex domain of Earth Observation (EO) application systems. The presented approach is based on an EO integration framework. It supports the concatenation of disparate software applications to flexible EO process chains. Resulting EO services are provided to end users by means of Web Service technology. We demonstrate that current standard technology is not sufficient to dynamically publish and interactively invoke EO services over the Web. We describe necessary extensions and adaptations.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2003-27&engl=0}
}
@inproceedings {INPROC-2003-22,
   author = {Marcello Mariucci and Clemens Dorda and Bernhard Mitschang},
   title = {{Design and Implementation of a Model-driven Earth Observation Integration Framework}},
   booktitle = {Proceedings of the Fifth International Conference on Information Integration and Web-based Applications \& Services: iiWAS '03; Jakarta, Indonesia, September 15-17, 2003. Vol. 170},
   editor = {Gabriele Kotsis and Stephane Bressan and Barbara Catania and Ismail Khalil Ibrahim},
   publisher = {Oesterreichische Computer Gesellschaft},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {215--225},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2003},
   isbn = {3-85403-170-10},
   keywords = {Model-driven Architecture, Process-oriented Application Integration, Model-driven Integration Process, Repository System},
   language = {Englisch},
   cr-category = {H.3.5 Online Information Services,     J.2 Physical Sciences and Engineering,     D.2.2 Software Engineering Design Tools and Techniques},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2003-22/INPROC-2003-22.pdf,     ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2003-22/INPROC-2003-22.ps},
   contact = {For more information please contact mariucci@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {An Earth Observation (EO) integration framework is an application integration solution for supporting the development and execution of EO services. EO services are based on the intensive use of large data sets from space, and require the tight cooperation of several distributed experts. They can be characterized as highly flexible structures, which constantly need to be adapted to evolving spacecraft and processing technologies. In this paper we introduce a model-driven approach for an EO integration framework. We describe a comprehensive integration model that adequately copes with the flexible development, customization, and execution of EO services. The accurate treatment of related model instances throughout the software life cycle significantly enhances the EO service development process in terms of quality, reuse, and adaptability. We discuss technological aspects for the realization of such an integration framework, and outline our prototype implementation that is mainly built upon commercial products. We demonstrate that such a model-driven approach can be realized by employing repository technology for managing model related issues, as well as workflow and Web service technology for execution purposes.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2003-22&engl=0}
}
@inproceedings {INPROC-2003-17,
   author = {Kurt Rothermel and Dieter Fritsch and Bernhard Mitschang and Paul J. K{\"u}hn and Martin Bauer and Christian Becker and Christian Hauser and Daniela Nicklas and Steffen Volz},
   title = {{SFB 627: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme}},
   booktitle = {Proceedings Informatik 2003},
   address = {Frankfurt},
   publisher = {Gesellschaft f{\"u}r Informatik},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2003},
   keywords = {Umgebungsmodelle; kontext-bezogene Systeme; Nexus; Spatial World Models; context-aware systems},
   language = {Deutsch},
   cr-category = {C.2.4 Distributed Systems,     H.2.4 Database Management Systems,     H.2.8 Database Applications,     H.3.4 Information Storage and Retrieval Systems and Software},
   ee = {http://www.nexus.uni-stuttgart.de},
   contact = {Kurt.Rothermel@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Verteilte Systeme;     Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Kommunikationsnetze und Rechnersysteme (IKR);     Universit{\"a}t Stuttgart, Institut f{\"u}r Photogrammetrie (ifp)},
   abstract = {Ziel des Sonderforschungsbereichs 627 ``Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme'' ist die Erforschung von Methoden und Verfahren f{\"u}r die Definition, die Verwaltung und die Nutzung von digitalen Umgebungsmodellen. Existierende Informationsr{\"a}ume werden mit komplexen Modellen der realen Welt verschr{\"a}nkt und erlauben so neuartige Anwendungen. Insbesondere die Klasse der ortsbezogenen Anwendungen und aktuelle Forschungsgebiete wie das Ubiquitous Computing k{\"o}nnen von solchen Umgebungsmodellen profitieren, oder werden durch sie erst erm{\"o}glicht.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2003-17&engl=0}
}
@inproceedings {INPROC-2003-04,
   author = {Tobias Kraft and Holger Schwarz and Ralf Rantzau and Bernhard Mitschang},
   title = {{Coarse-Grained Optimization: Techniques for Rewriting SQL Statement Sequences}},
   booktitle = {Proceedings of 29th International Conference on Very Large Data Bases (VLDB 2003), Berlin, September 9-12, 2003},
   publisher = {Morgan Kaufmann},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Germany},
   pages = {488--499},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2003},
   isbn = {0-12-722442-4},
   keywords = {SQL; Query Optimization; OLAP},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {Relational OLAP tools and other database applications generate sequences of SQL statements that are sent to the database server as result of a single information request provided by a user. Unfortunately, these sequences cannot be processed efficiently by current database systems because they typically optimize and process each statement in isolation. We propose a practical approach for this optimization problem, called ``coarse-grained optimization,'' complementing the conventional query optimization phase. This new approach exploits the fact that statements of a sequence are correlated since they belong to the same information request. A lightweight heuristic optimizer modifies a given statement sequence using a small set of rewrite rules. Since the optimizer is part of a separate system layer, it is independent of but can be tuned to a specific underlying database system. We discuss implementation details and demonstrate that our approach leads to significant performance improvements.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2003-04&engl=0}
}
@inproceedings {INPROC-2003-03,
   author = {Bernhard Mitschang and Engelbert Westk{\"a}mper and Carmen Constantinescu and Uwe Heinkel and Benno L{\"o}ffler and Ralf Rantzau and Ralph Winkler},
   title = {{Divide et Impera: A Flexible Integration of Layout Planning and Logistics Simulation through Data Change Propagation}},
   booktitle = {Proceedings of the 36th CIRP International Seminar on Manufacturing Systems (CIRP ISMS 2003), June 03-05, 2003 Saarland University, Saarbr{\"u}cken, Germany},
   editor = {C. Weber and H. Bley and G. Hirt},
   address = {Saarbr{\"u}cken, Germany},
   publisher = {Saarland University},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Germany},
   pages = {411--418},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2003},
   isbn = {3-930429-58-6},
   keywords = {integration of information systems; XML technologies; facility layout planning; logistics simulation},
   language = {Englisch},
   cr-category = {H.2.5 Heterogeneous Databases,     J.6 Computer-Aided Engineering},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2003-03/INPROC-2003-03.pdf},
   contact = {Uwe.Heinkel@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {The turbulent markets lead to new challenges for today’s enterprises, they have to be transformable to stay competitive. Therefore, we developed a new approach that integrates Logistic Simulation and Layout Planning to fulfil the goal of improving the production system. Our approach is based on propagation and transformation of data changes concerning the continuous adaptation tasks among the Layout Planning and Logistics Simulation systems. Instead of relying on a tightly integrated global data schema, we connect systems only as far as required by building “bridges” between them. The systems that participate in the integration are kept autonomous. We use several state-of-the-art XML technologies in our integration system.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2003-03&engl=0}
}
@inproceedings {INPROC-2003-01,
   author = {Christoph Mangold and Ralf Rantzau and Bernhard Mitschang},
   title = {{F{\"o}deral: Management of Engineering Data Using a Semistructured Data Model}},
   booktitle = {Proceedings of the International Conference on Enterprise Information Systems (ICEIS), Angers, France, April 2003},
   publisher = {Unknown},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Germany},
   type = {Konferenz-Beitrag},
   month = {April},
   year = {2003},
   isbn = {972-98816-1-8},
   keywords = {product data management, semistructured data, integration, data modeling},
   language = {Englisch},
   cr-category = {H.2 Database Management},
   contact = {christoph.mangold@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {The F{\"o}deral system is a flexible repository for the management, integration and modeling of product data. Current systems in this domain employ object-oriented data models. Whereas this is adequate for the management of product data, it proves insufficient for integration and modeling. Present semistructured data models, however, are suited ideally for integration, but data management and also modeling is a problem. In this paper we describe our approach to narrow down the gap between structured and semistructured data models. We present the F{\"o}deral information system which employs a new semistructured data model and show how this model can be employed in the context of management, integration, and modeling of engineering data.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2003-01&engl=0}
}
@inproceedings {INPROC-2002-42,
   author = {Bernhard Mitschang},
   title = {{A Necessity for CSCW in Design - The CHAMPAGNE Approach and Experience (invited)}},
   booktitle = {A Necessity for CSCW in Design - The CHAMPAGNE Approach and Experience},
   editor = {Uni Rio de Janiero},
   address = {Rio de Janiero},
   publisher = {The Seventh International Conference on Computer Supported Cooperative Work in Design},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--2},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2002},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems},
   contact = {Bernhard Mitschang Bernhard.Mitschang@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Data Propagation: A Necessity for CSCW in Design - The CHAMPAGNE Approach and Experience (invited), in: The Seventh International Conference on Computer Supported Cooperative Work in Design, Rio de Janeiro, Brasil, 2002.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2002-42&engl=0}
}
@inproceedings {INPROC-2002-41,
   author = {Aiko Frank and Bernhard Mitschang},
   title = {{A customizable shared information space to support concurrent design}},
   booktitle = {Computer in Industry},
   address = {Amsterdam},
   publisher = {Elsevier Science Publishers B. V.},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   volume = {48},
   pages = {45--57},
   type = {Konferenz-Beitrag},
   month = {Mai},
   year = {2002},
   language = {Englisch},
   cr-category = {H.5.3 Group and Organization Interfaces},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Sharing data is an important aspect in distributed design environments and should be supported by an underlying system. Any synchronous access to data is conflict prone. Applying concurrency control and two phase commit are one option to be considered. But design processes also demand for cooperation between the designers. Negotiation about actions on the product under design and the early exchange of preliminary results are crucial issues. Controlled data access by itself does not fulfil all the needs for cooperation. We will present a new approach that relies on a concept and system model which integrates concurrent activities by a joint information space offering flexible protocols for cooperation on the shared objects. We will describe the customizability of the protocols to effectively support different cooperative scenarios.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2002-41&engl=0}
}
@inproceedings {INPROC-2002-17,
   author = {Marcello Mariucci and Bernhard Mitschang},
   title = {{On Making RAMSES an Earth Observation Application Framework}},
   booktitle = {Proceedings of the 2nd International Conference on Information Systems and Engineering: ISE 2002; San Diego, California, July 14-18, 2002},
   editor = {Waleed W. Smari and Nordine Melab and Shu-Ching Chen},
   address = {San Diego},
   publisher = {The Society for Modeling and Simulation International (SCS)},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Germany},
   series = {Simulation Series},
   volume = {34 (2)},
   pages = {67--72},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2002},
   isbn = {1-56555-251-2},
   keywords = {Frameworks for Information Technologies, Software Architectures for Information Systems, Component-Based Designs, Service-Based Approaches},
   language = {Englisch},
   cr-category = {H.4.m Information Systems Applications Miscellaneous},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2002-17/INPROC-2002-17.pdf},
   contact = {For further information, please send an email to mariucci@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {RAMSES is one of the first large-scale prototypes of an operational Earth Observation (EO) application system. It implements a complex infrastructure for the extensive support of a thematic EO application system, which focuses on the detection and monitoring of oil spills. Since EO application systems are usually built on top of a set of generic functions, this paper analyses and assesses the RAMSES infrastructure in order to form a generic EO application framework. This framework should mainly support the collaborative development and customization of emerging EO application systems by maximizing the use of already existing system facilities. Furthermore, it should support the flexible extension and rapid reconfiguration of workflows as the business changes. Results of our analyses show that the RAMSES infrastructure does not cover all requirements of an EO application framework. We therefore introduce advanced design concepts and propose a new framework architecture that structurally controls the inherent complexity of the interdisciplinary domain of EO application systems.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2002-17&engl=0}
}
@inproceedings {INPROC-2002-03,
   author = {Carmen Constantinescu and Uwe Heinkel and Ralf Rantzau and Bernhard Mitschang},
   title = {{A System for Data Change Propagation in Heterogeneous Information Systems}},
   booktitle = {Proceedings of the International Conference on Enterprise Information Systems (ICEIS), Volume I, Cuidad Real, Spain, April 2002},
   publisher = {ICEIS Press/Escola Superior de Technologia de Setubal, Portugal},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Germany},
   pages = {73--80},
   type = {Konferenz-Beitrag},
   month = {April},
   year = {2002},
   keywords = {enterprise application integration; manufacturing; repository; propagation},
   language = {Englisch},
   cr-category = {H.3.4 Information Storage and Retrieval Systems and Software},
   ee = {http://www.iceis.org},
   contact = {carmen.constantinescu@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {Today, it is common that enterprises manage several mostly heterogeneous information systems to supply their production and business processes with data. There is a need to exchange data between the information systems while preserving system autonomy. Hence, an integration approach that relies on a single global en-terprise data schema is ruled out. This is also due to the widespread usage of legacy systems. We propose a system, called Propagation Manager, which manages dependencies between data objects stored in different information systems. A script specifying complex data transformations and other sophisticated activities, like the execution of external programs, is associated with each dependency. For example, an object update in a source system can trigger data transformations of the given source data for each destination system that depends on the object. Our system is implemented using current XML technologies. We present the archi-tecture and processing model of our system and demonstrate the benefit of our approach by illustrating an extensive example scenario.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2002-03&engl=0}
}
@inproceedings {INPROC-2002-01,
   author = {Ralf Rantzau and Leonard Shapiro and Bernhard Mitschang and Quan Wang},
   title = {{Universal Quantification in Relational Databases: A Classification of Data and Algorithms}},
   booktitle = {Proceedings of the International Conference on Extending Database Technology (EDBT), Prague, Czech Republic, March 2002},
   publisher = {Springer},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Germany},
   series = {Lecture Notes in Computer Science},
   volume = {2287},
   pages = {445--463},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {2002},
   isbn = {3-540-43324-4},
   keywords = {query processing; relational division; physical operators},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2002-01/INPROC-2002-01.ps,     http://www.springer.de/comp/lncs/index.html},
   contact = {rrantzau@acm.org},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {Queries containing universal quantification are used in many applications, including business intelligence applications. Several algorithms have been proposed to implement universal quantification efficiently. These algorithms are presented in an isolated manner in the research literature - typically, no relationships are shown between them. Furthermore, each of these algorithms claims to be superior to others, but in fact each algorithm has optimal performance only for certain types of input data. In this paper, we present a comprehensive survey of the structure and performance of algorithms for universal quantification. We introduce a framework for classifying all possible kinds of input data for universal quantification. Then we go on to identify the most efficient algorithm for each such class. One of the input data classes has not been covered so far. For this class, we propose several new algorithms. For the first time, we are able to identify the optimal algorithm to use for any given input dataset. These two classifications of input data and optimal algorithms are important for query optimization. They allow a query optimizer to make the best selection when optimizing at intermediate steps for the quantification problem.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2002-01&engl=0}
}
@inproceedings {INPROC-2001-70,
   author = {Albrecht Messner and Bernhard Mitschang},
   title = {{Leistungsbewertung f{\"u}r Application Server Technologie: ein parametrisierbarer Benchmark-Ansatz am Beispiel des Brokat Twister Application Servers.}},
   booktitle = {GI Jahrestagung (2) 2001. Bd. 2},
   editor = {Gesellschaft f{\"u}r Informatik e.V.},
   address = {Wien},
   publisher = {Gesellschaft f{\"u}r Informatik e.V.},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {909--915},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2001},
   isbn = {3-85403-157-2},
   language = {Deutsch},
   cr-category = {H.2.2 Database Management Physical Design},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {E-Commerce: Systemunterst{\"u}tzung f{\"u}r den Umgang mit Daten und Prozessen in vernetzten Anwendungsumgebungen. Leistungsbewertung f{\"u}r Application Server Technologie: ein parametrisierbarer Benchmark-Ansatz am Beispiel des Brokat Twister Application Servers. 909-915},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2001-70&engl=0}
}
@inproceedings {INPROC-2001-44,
   author = {Daniela Nicklas and Christoph Pfisterer and Bernhard Mitschang},
   title = {{Towards Location-based Games}},
   booktitle = {Proceedings of the International Conference on Applications and Development of Computer Games in the 21st Century: ADCOG 21; Hongkong Special Administrative Region, China, November 22-23 2001},
   editor = {Alfred Loo Wai Sing and Wan Hak Man and Wong Wai and Cyril Tse Ning},
   address = {Hong Kong},
   publisher = {Division of Computer Studies, City University of Hong kong, Hong Kong SAR, China},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {61--67},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2001},
   isbn = {924-442-199-4},
   keywords = {Nexus; Location-based Services; Games; Augmented World Model; Nexus Applications},
   language = {Deutsch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     H.3.5 Online Information Services,     K.8 Personal Computing},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2001-44/INPROC-2001-44.pdf,     ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2001-44/INPROC-2001-44.ps,     http://www.nexus.uni-stuttgart.de},
   contact = {Daniela Nicklas danickla@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {In this paper we investigate the basic properties of location-based games. This new type of game is made possible by the recent advances of mobile computing hardware and infrastructure. Players act not by pressing buttons or moving pawns on a board, but by moving around themselves in the real world. We present a simple classification of location-based games, and show how these games can be designed and implemented. With some adaptations, game concepts from existing board and computer games can be mapped to make location-based games more interesting and fun to play. Our methods are demonstrated with three actual game examples. Further, common infrastructure requirements are deduced and we show how the open platform developed by the neXus working group fulfills them.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2001-44&engl=0}
}
@inproceedings {INPROC-2001-43,
   author = {Daniela Nicklas and Bernhard Mitschang},
   title = {{The Nexus Augmented World Model: An Extensible Approach for Mobile, Spatially-Aware Applications}},
   booktitle = {Proceedings of the 7th International Conference on Object-Oriented Information Systems : OOIS '01 ; Calgary, Canada, August 27-29, 2001},
   editor = {Yingxu Wang and Shushma Patel and Ronald Johnston},
   address = {London},
   publisher = {Springer-Verlag},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {392--401},
   type = {Konferenz-Beitrag},
   month = {Januar},
   year = {2001},
   isbn = {1-85233-546-7},
   keywords = {Nexus; location-based services; location-aware; augmented world model},
   language = {Englisch},
   cr-category = {H.2.1 Database Management Logical Design,     H.2.8 Database Applications,     H.3.5 Online Information Services},
   ee = {http://www.nexus.uni-stuttgart.de/},
   contact = {Daniela Nickals daniela.nicklas@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {How should the World Wide Web look like if it were for location-based information? And how would mobile, spatially aware applications deal with such a platform? In this paper we present the neXus Augmented World Model, an object oriented data model which plays a major role in an open framework for both providers of location-based information and new kinds of applications: the neXus platform. We illustrate the usability of the model with several sample applications and show the extensibility of this framework. At last we present a stepwise approach for building spatially aware applications in this environment.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2001-43&engl=0}
}
@inproceedings {INPROC-2001-41,
   author = {Daniela Nicklas and Matthias Gro{\ss}mann and Thomas Schwarz and Steffen (ifp) Volz and Bernhard Mitschang},
   title = {{A Model-Based, Open Architecture for Mobile, Spatially Aware Applications}},
   booktitle = {Proceedings of the 7th International Symposium on Spatial and Temporal Databases: SSTD 2001; Redondo Beach, CA, USA, July 12-15, 2001},
   editor = {Christian S. Jensen and Markus Schneider and Bernhard Seeger and Vassilis J. Tsotras},
   address = {Berlin, Heidelberg, New York},
   publisher = {Springer-Verlag},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   series = {Lecture Notes in Computer Science},
   volume = {2121},
   pages = {117--135},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2001},
   isbn = {3-540-42301-X},
   keywords = {Mobile Computing; Location-Aware Applications; Augmented World Model},
   language = {Englisch},
   cr-category = {H.2.1 Database Management Logical Design,     H.3.4 Information Storage and Retrieval Systems and Software,     H.3.5 Online Information Services},
   ee = {http://www.nexus.uni-stuttgart.de},
   contact = {daniela.nicklas@informatik.uni-stuttgart.de, matthias.grossmann@informatik.uni-stuttgart.de, thomas.schwarz@informatik.uni-stuttgart.de, steffen.volz@ifp.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Photogrammetrie (ifp)},
   abstract = {With the emerging availability of small and portable devices that are able to determine their position and to communicate wirelessly, mobile and spatially aware applications become feasible. These applications rely on information that is bound to locations. In this paper we present Nexus, a platform for such applications, which is open for both new applications and new information providers, similar to the World Wide Web. Distributed servers provide location-based information, which is federated to an integrated view for the applications. To achieve this goal, we present the concept of the Augmented World Model, which is a common data model for location-based information. We give an example to show how applications can use this platform.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2001-41&engl=0}
}
@inproceedings {INPROC-2001-40,
   author = {Jochen R{\"u}tschlin and G{\"u}nter Sauter and J{\"u}rgen Sellentin and Klaudia Hergula and Bernhard Mitschang},
   title = {{Komponenten-Middleware: Der n{\"a}chste Schritt zur Interoperabilit{\"a}t von IT-Systemen}},
   booktitle = {Tagungsband der 9. GI-Fachtagung ``Datenbanksysteme in B{\"u}ro, Technik und Wissenschaft'' (BTW 2001), 7.-9. M{\"a}rz 2001, Oldenburg},
   editor = {Andreas Heuer and Frank Leymann and Denny Priebe},
   address = {Berlin, Heidelberg, New York},
   publisher = {Springer-Verlag},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Germany},
   pages = {322--331},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {2001},
   isbn = {3-540-41707-9},
   language = {Deutsch},
   cr-category = {H.3.4 Information Storage and Retrieval Systems and Software,     H.3.5 Online Information Services},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2001-40/INPROC-2001-40.pdf},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {In diesem Papier stellen wir eine erste Konzeption f{\"u}r eine komponentenbasierte Middleware vor. Dabei verwenden wir neutrale Daten- und Beschreibungsmodelle, um eine Abstraktion bzgl. bestehender Komponentenmodelle zu erlangen. Kernpunkte in unserer Architektur sind die Komponentenschnittstellen, das auf SOAP basierende Kommunikationsprotokoll und ein Corporate Repository.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2001-40&engl=0}
}
@inproceedings {INPROC-2001-39,
   author = {Jochen R{\"u}tschlin and J{\"u}rgen Sellentin and Bernhard Mitschang},
   title = {{Industrieller Einsatz von Application Server Technologie}},
   booktitle = {Informatik 2001: Wirtschaft und Wissenschaft in der Network Economy – Visionen und Wirklichkeit. Tagungsband der GI/OCG-Jahrestagung, 25.-28. September 2001, Universit{\"a}t Wien.},
   editor = {Kurt Bauknecht and Wilfried Brauer and Thomas M{\"u}ck},
   publisher = {{\"O}sterreichische Computer Gesellschaft},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Germany},
   pages = {916--921},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2001},
   isbn = {3-85403-157-2},
   keywords = {Enterprise Application Integration, EAI, Integrationsarchitektur, Middleware, Application Server, J2EE},
   language = {Deutsch},
   cr-category = {C.2.4 Distributed Systems,     D.2.11 Software Engineering Software Architectures,     D.2.12 Software Engineering Interoperability,     H.4.m Information Systems Applications Miscellaneous},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {In diesem Beitrag wollen wir anhand einer Integrationsarchitektur aus dem EAI-Umfeld motivieren und aufzeigen, wie Application Server Technologie sinnvoll bei der Zusammenf{\"u}hrung von Systemen in einer vernetzten Umgebung eingesetzt werden kann. Dazu stellen wir erst unsere bisherige Integrationsarchitektur vor und erl{\"a}utern an dieser einige Nachteile des traditionellen Vorgehens. Ein Abschnitt {\"u}ber Application Server und die J2EE-Bestrebungen leiten {\"u}ber zu einem Neuvorschlag der Integrationsarchitektur, realisiert auf Basis eben dieser Application Server Technologie.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2001-39&engl=0}
}
@inproceedings {INPROC-2001-37,
   author = {Carmen Constantinescu and Uwe Heinkel and Ralf Rantzau and Bernhard Mitschang},
   title = {{SIES - An Approach for a Federated Information System in Manufacturing}},
   booktitle = {Proceedings of the International Symposium on Information Systems and Engineering (ISE); Las Vegas, Nevada, USA, June 2001},
   publisher = {CSREA Press},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Germany},
   pages = {269--275},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2001},
   isbn = {1-892512-85-8},
   keywords = {enterprise application integration; manufacturing; federation; repository; propagation},
   language = {Englisch},
   cr-category = {H.3.4 Information Storage and Retrieval Systems and Software},
   contact = {carmen.constantinescu@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {Many problems encountered in providing enterprise-wide information are related to the integration of databases and systems that have been independently developed and also to the management of changes and transformations of data from one database (or system) into another. A major requirement is to accommodate heterogeneity and at the same time to preserve the autonomy of the components. This paper presents our approach to a repository-driven federated system based on a propagation mechanism. The Stuttgart Information and Exploration System (SIES), is characterized by its main components: the Federation Manager, the Propagation Manager and the Re-pository System.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2001-37&engl=0}
}
@inproceedings {INPROC-2001-33,
   author = {Aiko Frank and Bernhard Mitschang},
   title = {{On Sharing of Objects in Concurrent Design}},
   booktitle = {Proceedings of the 6th International Conference on CSCW in Design (CSCWID), London, ON, Canada, July, 2001},
   editor = {Weiming Shen and Zongkai Lin and Jean-Paul Barth{\`e}s and Mohamed Kamel},
   address = {Ottawa, Canada},
   publisher = {NRC Research Press},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Germany},
   pages = {71--76},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2001},
   isbn = {0-660-18493-1},
   keywords = {Designflow; CSCW; data sharing; agent protocols},
   language = {Englisch},
   cr-category = {H.4.1 Office Automation,     H.5.3 Group and Organization Interfaces,     J.6 Computer-Aided Engineering},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2001-33/INPROC-2001-33.pdf},
   contact = {frankao@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {Sharing data is an important aspect in distributed design environments and should be supported by an underlying system. Any synchronous access to data is conflict prone. Applying concurrency control and two phase commit are an option to be considered. But design processes also demand cooperation between the designers. Negotiation about actions on the product under design and the early exchange of preliminary results are crucial issues. Controlled data access by itself doesn’t fullfil all the needs for cooperation. We will present a new approach that relies on a concept and system model which integrates concurrent activities by a common information space offering flexible protocols for cooperation on the shared objects. We will describe the customizability of the protocols to allow the approach to be adapted to different cooperative scenarios.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2001-33&engl=0}
}
@inproceedings {INPROC-2001-32,
   author = {Holger Schwarz and Ralf Wagner and Bernhard Mitschang},
   title = {{Improving the Processing of Decision Support Queries: The Case for a DSS Optimizer}},
   booktitle = {Proc. of the 2001 International Database Engineering \& Applications Symposium (IDEAS), July 16-18, 2001},
   editor = {Michel Adiba and Christine Collet and Bipin C. Desai},
   address = {Los Alamitos, Washington, Brussels, Tokyo},
   publisher = {IEEE Computer Society},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Germany},
   pages = {177--186},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2001},
   isbn = {0-7695-1140-6},
   keywords = {Decision Support; OLAP; Data Warehouse},
   language = {Englisch},
   cr-category = {H.4.2 Information Systems Applications Types of Systems},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2001-32/INPROC-2001-32.pdf},
   contact = {holger.schwarz@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {Many decision support applications are built upon data mining and OLAP tools and allow users to answer information requests based on a data warehouse that is managed by a powerful DBMS. In this paper, we focus on tools that generate sequences of SQL statements in order to produce the requested information. Our thorough analysis revealed that many sequences of queries that are generated by commercial tools are not very efficient. An optimized system architecture is suggested for these applications. The main component is a DSS optimizer that accepts previously generated sequences of queries and remodels them according to a set of optimization strategies, before they are executed by the underlying database system. The advantages of this extended architecture are discussed and a couple of appropriate optimization strategies are identified. Experimental results are given, showing that these strategies are appropriate to optimize typical query sequences of an OLAP application.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2001-32&engl=0}
}
@inproceedings {INPROC-2000-01,
   author = {Clara Nippl and Ralf Rantzau and Bernhard Mitschang},
   title = {{StreamJoin: A Generic Database Approach to Support the Class of Stream-Oriented Applications}},
   booktitle = {Proceedings of the International Database Engineering \& Applications Symposium (IDEAS); Yokohama, Japan, September 2000},
   publisher = {IEEE Computer Society},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Germany},
   pages = {83--91},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2000},
   isbn = {0 7695 0789 1},
   keywords = {database applications; data mining; database extensions; database operators},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems,     H.2.8 Database Applications},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2000-01/INPROC-2000-01.ps},
   contact = {rrantzau@acm.org},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {Today many applications routinely generate large quantities of data. The data often takes the form of (time) series, or more generally streams, i.e. an ordered se-quence of records. Analysis of this data requires stream processing techniques which differ in significant ways from what current database analysis and query tech-niques have been optimized for. In this paper we present a new operator, called StreamJoin, that can efficiently be used to solve stream-related problems of various appli-cations, such as universal quantification, pattern recog-nition and data mining. Contrary to other approaches, StreamJoin processing provides rapid response times, a non-blocking execution as well as economical resource utilization. Adaptability to different application scenarios is realized by means of parameters. In addition, the StreamJoin operator can be efficiently embedded into the database engine, thus implicitly using the optimization and parallelization capabilities for the benefit of the ap-plication. The paper focuses on the applicability of StreamJoin to integrate application semantics into the DBMS.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2000-01&engl=0}
}
@inproceedings {INPROC-1999-30,
   author = {Hermann Ludwig M{\"o}ller and Marcello Mariucci and Bernhard Mitschang},
   title = {{Architecture Considerations for Advanced Earth Observation Application Systems}},
   booktitle = {Proceedings of the Second International Conference on Interoperating Geographic Information System: Interop '99; Zurich, Switzerland, March 10-12, 1999},
   editor = {Andrej V{\`e}kovski and Brassel Kurt E. and Schek Hans-J{\"o}rg},
   address = {Berlin, Heidelberg, New York, Barcelona, Hong Kong, London, Milan, Paris, Singapore},
   publisher = {Springer-Verlag},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Germany},
   series = {Lecture Notes in Computer Science},
   volume = {1580},
   pages = {75--90},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {1999},
   isbn = {3-540-65725-8},
   keywords = {Distributed Information Systems; Earth Observation Systems; Applications; Interoperability; Middleware; CORBA},
   language = {Englisch},
   cr-category = {H.2 Database Management,     H.4 Information Systems Applications,     J.2 Physical Sciences and Engineering,     E.1 Data Structures},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-1999-30/INPROC-1999-30.pdf,     ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-1999-30/INPROC-1999-30.ps},
   contact = {Please send an email to mariucci@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {Application systems in the earth observation area can be characterised as distributed, platform-inhomogeneous, complex, and cost intensive information systems. In order to manage the complexity and performance requirements set by these application scenarios a number of architectural considerations have to be applied. Among others the most important ones are modularization towards a component architecture and interoperation within this component model. As will be described in this paper, both are mandatory to achieving a high degree of reusability and extensibility at the component level as well as to support the necessary scalability properties. In our paper we refer to the state of the art in earth observation application systems as well as to a prototype system that reflects to a high degree the above mentioned system characteristics.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-1999-30&engl=0}
}
@inproceedings {INPROC-1999-27,
   author = {J{\"u}rgen Sellentin and Aiko Frank and Bernhard Mitschang},
   title = {{TOGA -- A Customizable Service for Data-Centric Collaboration}},
   booktitle = {Proceedings of the 11th Conference on Advanced Information Systems Engineering (CAiSE*99)},
   editor = {Matthias Jarke and Andreas Oberweis},
   publisher = {Springer},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Germany},
   series = {Lecture Notes in Computer Science},
   volume = {1626},
   pages = {301--316},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {1999},
   isbn = {3-540-66157-3},
   language = {Englisch},
   cr-category = {H.2 Database Management},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {TOGA -- A Customizable Service for Data-Centric Collaboration.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-1999-27&engl=0}
}
@inproceedings {INPROC-1999-25,
   author = {Stefan Sarstedt and G{\"u}nter Sauter and J{\"u}rgen Sellentin and Bernhard Mitschang},
   title = {{Integrationskonzepte f{\"u}r heterogene Anwendungssysteme bei DaimlerChrysler auf Basis internationaler Standards}},
   booktitle = {Datenbanksysteme in B{\"u}ro, Technik und Wissenschaft, GI-Fachtagung BTW 99, Freiburg im Breisgau, 1.-3. M{\"a}rz 1999},
   editor = {A. Buchmann},
   address = {Berlin, Heidelberg, New York},
   publisher = {Springer},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Germany},
   series = {Informatik aktuell},
   pages = {317--327},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {1999},
   isbn = {3-540-65606-5},
   keywords = {Funktionsintegration; API-Integration; Heterogenit{\"a}t; STEP; CORBA},
   language = {Deutsch},
   cr-category = {H.2.5 Heterogeneous Databases},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-1999-25/INPROC-1999-25.pdf},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {Aufbauend auf den Anforderungen von DaimlerChrysler und unter Verwendung der Standards STEP und CORBA wird eine Architektur und Vorgehensweise f{\"u}r die Integration von Daten und Funktionen heterogener Anwendndungssysteme entwickelt. Die eingebrachten Systemkonzepte sowie die dadurch zu erwartende Optimierung des Entwicklungsprozesses werden am Beispiel des Bereichs PKW-Entwicklung diskutiert.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-1999-25&engl=0}
}
@inproceedings {INPROC-1999-24,
   author = {Michael Jaedicke and Bernhard Mitschang},
   title = {{User-Defined Table Operators: Enhancing Extensibility for ORDBMS}},
   booktitle = {VLDB'99, Proceedings of 25th International Conference on Very Large Data Bases, Edinburgh, Scotland, UK, September 7-10, 1999},
   publisher = {Morgan Kaufmann},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Germany},
   pages = {494--505},
   type = {Konferenz-Beitrag},
   month = {Mai},
   year = {1999},
   isbn = {1-55860-615-7},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems},
   ee = {http://www3.informatik.tu-muenchen.de/public/projekte/sfb342/publications/99.SFB-Bericht.Multiop.ps.gz},
   contact = {Bernhard Mitschang mitsch@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {Currently parallel object-relational database technology is setting the direction for the future of data management. A central enhancement of object-relational database technology is the possibility to execute arbitrary user-defined functions within SQL statements. We show the limits of this approach and propose user-defined table operators as a new concept that allows the definition and implementation of arbitrary user-defined N-ary database operators, which can be programmed using SQL or Embedded SQL (with some extensions). Our approach leads to a new dimension of extensibility that allows to push more application code into the server with full support for efficient execution and parallel processing. Furthermore it allows performance enhancements of orders of magnitude for the evaluation of many queries with complex user-defined functions as we show for two concrete examples. Finally, our implementation perception guarantees that this approach fits well into the architectures of commercial object-relational database management systems.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-1999-24&engl=0}
}
@inproceedings {INPROC-1998-20,
   author = {J{\"u}rgen Sellentin and Bernhard Mitschang},
   title = {{Data Intensive Intra- \& Internet Applications - An Example Using Java and CORBA in the World Wide Web}},
   booktitle = {Proceedings of the Fourteenth International Conference on Data Engineering, February 23-27, 1998, Orlando, Florida, USA},
   publisher = {IEEE},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Germany},
   pages = {302--311},
   type = {Konferenz-Beitrag},
   month = {Februar},
   year = {1998},
   isbn = {0-8186-8289-2},
   language = {Englisch},
   cr-category = {H.4 Information Systems Applications,     H.2.4 Database Management Systems,     H.2.5 Heterogeneous Databases},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {Intra/Internet technology has become a key issue in the development of modern systems. Nowadays it is not sufficient anymore to present static information sheets through the WWW, instead we need interactive applications that may even compute complex results or process large data sets. In this paper we describe a prototype based on Java and CORBA. Both represent modern concepts that have been developed to fulfill these requirements. Their combination results into the kind of data processing we want to apply to the WWW: First, portable, powerful, structured and even reusable client programs instead of cryptic HTML scripts, second, well defined interfaces, and third, efficient server processes separated from the WWW server and its CGI extensions. Communication is controlled by a fault tolerant CORBA layer, which also enables server development using a different language than Java. Besides a discussion of CORBA and its data shipping capabilities, we take a closer look at Java and its runtime behavior, and we report on the experiences gathered with our prototype system and its testbed application. This system has also been used to gather experiences with and to influence the new language binding of the Standard Data Access Interface (SDAI) of the Standard for the Exchange of Product Data (STEP, ISO 10303) to Java.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-1998-20&engl=0}
}
@inproceedings {INPROC-1998-19,
   author = {Clara Nippl and Bernhard Mitschang},
   title = {{TOPAZ: a Cost-Based, Rule-Driven, Multi-Phase Parallelizer}},
   booktitle = {VLDB'98, Proceedings of 24rd International Conference on Very Large Data Bases, New York City, New York, USA, August 24-27, 1998},
   publisher = {Morgan Kaufmann},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Germany},
   pages = {251--262},
   type = {Konferenz-Beitrag},
   month = {August},
   year = {1998},
   isbn = {1-55860-566-5},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems},
   contact = {Bernhard Mitschang mitsch@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {Currently the key problems of query optimization are extensibility imposedby object-relational technology, as well as query complexity caused by forthcoming applications, such as OLAP. We propose a generic approach to parallelization, called TOPAZ. Different forms of parallelism are exploited to obtain maximum speedup combined with lowest resource consumption. The necessary abstractions w.r.t. operator characteristics and system architecture are provided by rules that are used by a cost-based, top-down search engine. A multi-phase pruning based on a global analysis of the plan efficiently guides the search process, thus considerably reducing complexity and achieving optimization performance. Since TOPAZ solely relies on the widespread concepts of iterators and datarivers common to (parallel) execution models, it fits as an enabling technology into most state-of-the-art (object-) relational systems.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-1998-19&engl=0}
}
@inproceedings {INPROC-1998-18,
   author = {Michael Jaedicke and Bernhard Mitschang},
   title = {{On Parallel Processing of Aggregate and Scalar Functions in Object-Relational DBMS}},
   booktitle = {Proceedings ACM SIGMOD International Conference on Management of Data, Seattle, Washington, USA, June 2-4, 1998},
   publisher = {ACM Press},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Germany},
   pages = {379--389},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {1998},
   isbn = {0-89791-995-5},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems},
   contact = {Bernhard Mitschang mitsch@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {Nowadays parallel object-relational DBMS are envisioned as the next great wave, but there is still a lack of efficient implementation concepts for some parts of the proposed functionality. Thus one of the current goals for parallel object-relational DBMS is to move towards higher performance. In this paper we develop a framework that allows to process user-defined functions with data parallelism. We will describe the class of partitionable functions that can be processed parallelly. We will also propose an extension which allows to speed up the processing of another large class of functions by means of parallel sorting. Functions that can be processed by means of our techniques are often used in decision support queries on large data volumes, for example. Hence a parallel execution is indispensable.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-1998-18&engl=0}
}
@article {ART-2024-01,
   author = {Jan Schneider and Christoph Gr{\"o}ger and Arnold Lutsch and Holger Schwarz and Bernhard Mitschang},
   title = {{The Lakehouse: State of the Art on Concepts and Technologies}},
   journal = {SN Computer Science},
   publisher = {Springer Nature},
   volume = {5},
   number = {5},
   pages = {1--39},
   type = {Artikel in Zeitschrift},
   month = {April},
   year = {2024},
   issn = {2661-8907},
   doi = {10.1007/s42979-024-02737-0},
   keywords = {Data Lakehouse; Data Lake; Data Platform; Data Analytics},
   language = {Englisch},
   cr-category = {H.3.4 Information Storage and Retrieval Systems and Software},
   ee = {https://doi.org/10.1007/s42979-024-02737-0,     https://link.springer.com/content/pdf/10.1007/s42979-024-02737-0.pdf},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {In the context of data analytics, so-called lakehouses refer to novel variants of data platforms that attempt to combine characteristics of data warehouses and data lakes. In this way, lakehouses promise to simplify enterprise analytics architectures, which often suffer from high operational costs, slow analytical processes and further shortcomings resulting from data replication. However, different views and notions on the lakehouse paradigm exist, which are commonly driven by individual technologies and varying analytical use cases. Therefore, it remains unclear what challenges lakehouses address, how they can be characterized and which technologies can be leveraged to implement them. This paper addresses these issues by providing an extensive overview of concepts and technologies that are related to the lakehouse paradigm and by outlining lakehouses as a distinct architectural approach for data platforms. Concepts and technologies from literature with regard to lakehouses are discussed, based on which a conceptual foundation for lakehouses is established. In addition, several popular technologies are evaluated regarding their suitability for the building of lakehouses. All findings are supported and demonstrated with the help of a representative analytics scenario. Typical challenges of conventional data platforms are identified, a new, sharper definition for lakehouses is proposed and technical requirements for lakehouses are derived. As part of an evaluation, these requirements are applied to several popular technologies, of which frameworks for data lakes turn out to be particularly helpful for the construction of lakehouses. Our work provides an overview of the state of the art and a conceptual foundation for the lakehouse paradigm, which can support future research.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2024-01&engl=0}
}
@article {ART-2023-07,
   author = {Rebecca Eichler and Christoph Gr{\"o}ger and Eva Hoos and Christoph Stach and Holger Schwarz and Bernhard Mitschang},
   title = {{Introducing the enterprise data marketplace: a platform for democratizing company data}},
   journal = {Journal of Big Data},
   publisher = {Springer Nature},
   volume = {10},
   pages = {1--38},
   type = {Artikel in Zeitschrift},
   month = {November},
   year = {2023},
   issn = {2196-1115},
   doi = {10.1186/s40537-023-00843-z},
   keywords = {Data Catalog; Data Democratization; Data Market; Data Sharing; Enterprise Data Marketplace; Metadata Management},
   language = {Englisch},
   cr-category = {E.m Data Miscellaneous,     H.3.7 Digital Libraries,     H.4.m Information Systems Applications Miscellaneous},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {In this big data era, multitudes of data are generated and collected which contain the potential to gain new insights, e.g., for enhancing business models. To leverage this potential through, e.g., data science and analytics projects, the data must be made available. In this context, data marketplaces are used as platforms to facilitate the exchange and thus, the provisioning of data and data-related services. Data marketplaces are mainly studied for the exchange of data between organizations, i.e., as external data marketplaces. Yet, the data collected within a company also has the potential to provide valuable insights for this same company, for instance to optimize business processes. Studies indicate, however, that a significant amount of data within companies remains unused. In this sense, it is proposed to employ an Enterprise Data Marketplace, a platform to democratize data within a company among its employees. Specifics of the Enterprise Data Marketplace, how it can be implemented or how it makes data available throughout a variety of systems like data lakes has not been investigated in literature so far. Therefore, we present the characteristics and requirements of this kind of marketplace. We also distinguish it from other tools like data catalogs, provide a platform architecture and highlight how it integrates with the company{\^a}€™s system landscape. The presented concepts are demonstrated through an Enterprise Data Marketplace prototype and an experiment reveals that this marketplace significantly improves the data consumer workflows in terms of efficiency and complexity. This paper is based on several interdisciplinary works combining comprehensive research with practical experience from an industrial perspective. We therefore present the Enterprise Data Marketplace as a distinct marketplace type and provide the basis for establishing it within a company.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2023-07&engl=0}
}
@article {ART-2023-04,
   author = {Alejandro Gabriel Villanueva Zacarias and Peter Reimann and Christian Weber and Bernhard Mitschang},
   title = {{AssistML: An Approach to Manage, Recommend and Reuse ML Solutions}},
   journal = {International Journal of Data Science and Analytics (JDSA)},
   publisher = {Springer Nature},
   type = {Artikel in Zeitschrift},
   month = {Juli},
   year = {2023},
   keywords = {Meta-learning; Machine learning; AutoML; Metadata; Recommender systems},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The adoption of machine learning (ML) in organizations is characterized by the use of multiple ML software components. When building ML systems out of these software components, citizen data scientists face practical requirements which go beyond the known challenges of ML, e.g., data engineering or parameter optimization. They are expected to quickly identify ML system options that strike a suitable trade-off across multiple performance criteria. These options also need to be understandable for non-technical users. Addressing these practical requirements represents a problem for citizen data scientists with limited ML experience. This calls for a concept to help them identify suitable ML software combinations. Related work, e.g., AutoML systems, are not responsive enough or cannot balance different performance criteria. This paper explains how AssistML, a novel concept to recommend ML solutions, i.e., software systems with ML models, can be used as an alternative for predictive use cases. Our concept collects and preprocesses metadata of existing ML solutions to quickly identify the ML solutions that can be reused in a new use case. We implement AssistML and evaluate it with two exemplary use cases. Results show that AssistML can recommend ML solutions in line with users{\^a}€™ performance preferences in seconds. Compared to AutoML, AssistML offers citizen data scientists simpler, intuitively explained ML solutions in considerably less time. Moreover, these solutions perform similarly or even better than AutoML models.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2023-04&engl=0}
}
@article {ART-2023-03,
   author = {Dennis Treder-Tschechlov and Manuel Fritz and Holger Schwarz and Bernhard Mitschang},
   title = {{ML2DAC: Meta-Learning to Democratize AutoML for Clustering Analysis}},
   journal = {Proceedings of the ACM on Management of Data (SIGMOD)},
   publisher = {Association for Computing Machinery (ACM)},
   volume = {1},
   number = {2},
   pages = {1--26},
   type = {Artikel in Zeitschrift},
   month = {Juni},
   year = {2023},
   doi = {10.1145/3589289},
   language = {Deutsch},
   cr-category = {I.5.3 Pattern Recognition Clustering},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Analysts often struggle with the combined algorithm selection and hyperparameter optimization problem, a.k.a. CASH problem in literature. Typically, they execute several algorithms with varying hyperparameter settings to find configurations that show valuable results. Efficiently finding these configurations is a major challenge. In clustering analyses, analysts face the additional challenge to select a cluster validity index that allows them to evaluate clustering results in a purely unsupervised fashion. Many different cluster validity indices exist and each one has its benefits depending on the dataset characteristics. While experienced analysts might address these challenges using their domain knowledge and experience, especially novice analysts struggle with them. In this paper, we propose a new meta-learning approach to address these challenges. Our approach uses knowledge from past clustering evaluations to apply strategies that experienced analysts would exploit. In particular, we use meta-learning to (a) select a suitable clustering validity index, (b) efficiently select well-performing clustering algorithm and hyperparameter configurations, and (c) reduce the search space to suitable clustering algorithms. In the evaluation, we show that our approach significantly outperforms state-of-the-art approaches regarding accuracy and runtime.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2023-03&engl=0}
}
@article {ART-2023-02,
   author = {Vitali Hirsch and Peter Reimann and Dennis Treder-Tschechlov and Holger Schwarz and Bernhard Mitschang},
   title = {{Exploiting Domain Knowledge to address Class Imbalance and a Heterogeneous Feature Space in Multi-Class Classification}},
   journal = {International Journal on Very Large Data Bases (VLDB-Journal)},
   publisher = {Springer},
   type = {Artikel in Zeitschrift},
   month = {Februar},
   year = {2023},
   keywords = {Classification; Domain knowledge; Multi-class Imbalance; Heterogeneous feature space},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Real-world data of multi-class classification tasks often show complex data characteristics that lead to a reduced classification performance. Major analytical challenges are a high degree of multi-class imbalance within data and a heterogeneous feature space, which increases the number and complexity of class patterns. Existing solutions to classification or data pre- processing only address one of these two challenges in isolation. We propose a novel classification approach that explicitly addresses both challenges of multi-class imbalance and heterogeneous feature space together. As main contribution, this approach exploits domain knowledge in terms of a taxonomy to systematically prepare the training data. Based on an experimental evaluation on both real-world data and several synthetically generated data sets, we show that our approach outperforms any other classification technique in terms of accuracy. Furthermore, it entails considerable practical benefits in real-world use cases, e.g., it reduces rework required in the area of product quality control.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2023-02&engl=0}
}
@article {ART-2022-08,
   author = {Christoph Stach and Cl{\'e}mentine Gritti and Dennis Przytarski and Bernhard Mitschang},
   title = {{Assessment and Treatment of Privacy Issues in Blockchain Systems}},
   journal = {ACM SIGAPP Applied Computing Review},
   publisher = {ACM},
   volume = {22},
   number = {3},
   pages = {5--24},
   type = {Artikel in Zeitschrift},
   month = {September},
   year = {2022},
   issn = {1559-6915},
   keywords = {blockchain; decentralized; immutable; tamper-proof; GDPR; privacy assessment; data purging; data authentication; permission control; privacy filters; privacy control environment},
   language = {Englisch},
   cr-category = {K.4.1 Computers and Society Public Policy Issues,     K.6.5 Security and Protection},
   contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de.},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The ability to capture and quantify any aspect of daily life via sensors, enabled by the Internet of Things (IoT), data have become one of the most important resources of the 21st century. However, the high value of data also renders data an appealing target for criminals. Two key protection goals when dealing with data are therefore to maintain their permanent availability and to ensure their integrity. Blockchain technology provides a means of data protection that addresses both of these objectives. On that account, blockchains are becoming increasingly popular for the management of critical data. As blockchains are operated in a decentralized manner, they are not only protected against failures, but it is also ensured that neither party has sole control over the managed data. Furthermore, blockchains are immutable and tamper-proof data stores, whereby data integrity is guaranteed. While these properties are preferable from a data security perspective, they also pose a threat to privacy and confidentiality, as data cannot be concealed, rectified, or deleted once they are added to the blockchain. In this paper, we therefore investigate which features of the blockchain pose an inherent privacy threat when dealing with personal or confidential data. To this end, we consider to what extent blockchains are in compliance with applicable data protection laws, namely the European General Data Protection Regulation (GDPR). Based on our identified key issues, we assess which concepts and technical measures can be leveraged to address these issues in order to create a privacy-by-design blockchain system.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2022-08&engl=0}
}
@article {ART-2022-07,
   author = {Christoph Stach and Cl{\'e}mentine Gritti and Julia Br{\"a}cker and Michael Behringer and Bernhard Mitschang},
   title = {{Protecting Sensitive Data in the Information Age: State of the Art and Future Prospects}},
   journal = {Future Internet},
   publisher = {MDPI},
   volume = {14},
   number = {11},
   pages = {1--42},
   type = {Artikel in Zeitschrift},
   month = {Oktober},
   year = {2022},
   issn = {1999-5903},
   doi = {10.3390/fi14110302},
   keywords = {smart service; privacy techniques; location-based services; health services; voice-controlled digital assistants; image analysis; food analysis; recommender systems; DNA sequence classification},
   language = {Englisch},
   cr-category = {K.4.1 Computers and Society Public Policy Issues,     K.6.5 Security and Protection},
   ee = {https://www.mdpi.com/1999-5903/14/11/302/htm},
   contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de.},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The present information age is characterized by an ever-increasing digitalization. Smart devices quantify our entire lives. These collected data provide the foundation for data-driven services called smart services. They are able to adapt to a given context and thus tailor their functionalities to the user's needs. It is therefore not surprising that their main resource, namely data, is nowadays a valuable commodity that can also be traded. However, this trend does not only have positive sides, as the gathered data reveal a lot of information about various data subjects. To prevent uncontrolled insights into private or confidential matters, data protection laws restrict the processing of sensitive data. One key factor in this regard is user-friendly privacy mechanisms. In this paper, we therefore assess current state-of-the-art privacy mechanisms. To this end, we initially identify forms of data processing applied by smart services. We then discuss privacy mechanisms suited for these use cases. Our findings reveal that current state-of-the-art privacy mechanisms provide good protection in principle, but there is no compelling one-size-fits-all privacy approach. This leads to further questions regarding the practicality of these mechanisms, which we present in the form of seven thought-provoking propositions.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2022-07&engl=0}
}
@article {ART-2022-06,
   author = {Christoph Stach and Michael Behringer and Julia Br{\"a}cker and Cl{\'e}mentine Gritti and Bernhard Mitschang},
   title = {{SMARTEN — A Sample-Based Approach towards Privacy-Friendly Data Refinement}},
   journal = {Journal of Cybersecurity and Privacy},
   publisher = {MDPI},
   volume = {2},
   number = {3},
   pages = {606--628},
   type = {Artikel in Zeitschrift},
   month = {August},
   year = {2022},
   issn = {2624-800X},
   doi = {10.3390/jcp2030031},
   keywords = {privacy; data refinement; data cleansing; data transformation; human-in-the-loop},
   language = {Englisch},
   cr-category = {K.4.1 Computers and Society Public Policy Issues,     K.6.5 Security and Protection},
   ee = {https://www.mdpi.com/2624-800X/2/3/31/htm},
   contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de.},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Two factors are crucial for the effective operation of modern-day smart services: Initially, IoT-enabled technologies have to capture and combine huge amounts of data on data subjects. Then, all these data have to be processed exhaustively by means of techniques from the area of big data analytics. With regard to the latter, thorough data refinement in terms of data cleansing and data transformation is the decisive cornerstone. Studies show that data refinement reaches its full potential only by involving domain experts in the process. However, this means that these experts need full insight into the data in order to be able to identify and resolve any issues therein, e.g., by correcting or removing inaccurate, incorrect, or irrelevant data records. In particular for sensitive data (e.g., private data or confidential data), this poses a problem, since these data are thereby disclosed to third parties such as domain experts. To this end, we introduce SMARTEN, a sample-based approach towards privacy-friendly data refinement to smarten up big data analytics and smart services. SMARTEN applies a revised data refinement process that fully involves domain experts in data pre-processing but does not expose any sensitive data to them or any other third-party. To achieve this, domain experts obtain a representative sample of the entire data set that meets all privacy policies and confidentiality guidelines. Based on this sample, domain experts define data cleaning and transformation steps. Subsequently, these steps are converted into executable data refinement rules and applied to the entire data set. Domain experts can request further samples and define further rules until the data quality required for the intended use case is reached. Evaluation results confirm that our approach is effective in terms of both data quality and data privacy.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2022-06&engl=0}
}
@article {ART-2022-01,
   author = {Christoph Stach and Julia Br{\"a}cker and Rebecca Eichler and Corinna Giebler and Bernhard Mitschang},
   title = {{Simplified Specification of Data Requirements for Demand-Actuated Big Data Refinement}},
   journal = {Journal of Data Intelligence},
   publisher = {Rinton Press},
   volume = {3},
   number = {3},
   pages = {366--400},
   type = {Artikel in Zeitschrift},
   month = {August},
   year = {2022},
   issn = {2577-610X},
   keywords = {data pre-processing; data transformation; knowledge modeling; ontology; data management; Data Lakes; zone model; food analysis},
   language = {Englisch},
   cr-category = {H.2.7 Database Administration,     E.2 Data Storage Representations,     H.3.3 Information Search and Retrieval,     H.2.8 Database Applications},
   contact = {Senden Sie eine E-Mail an christoph.stach@ipvs.uni-stuttgart.de.},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Data have become one of the most valuable resources in modern society. Due to increasing digitalization and the growing prevalence of the Internet of Things, it is possible to capture data on any aspect of today's life. Similar to physical resources, data have to be refined before they can become a profitable asset. However, such data preparation entails completely novel challenges: For instance, data are not consumed when being processed, whereby the volume of available data that needs to be managed increases steadily. Furthermore, the data preparation has to be tailored to the intended use case in order to achieve an optimal outcome. This, however, requires the knowledge of domain experts. Since such experts are typically not IT experts, they need tools that enable them to specify the data requirements of their use cases in a user-friendly manner. The goal of this data preparation is to provide any emerging use case with demand-actuated data. With this in mind, we designed a tailorable data preparation zone for Data Lakes called BARENTS. It provides a simplified method for domain experts to specify how data must be pre-processed for their use cases, and these data preparation steps are then applied automatically. The data requirements are specified by means of an ontology-based method which is comprehensible to non-IT experts. Data preparation and provisioning are realized resource-efficient by implementing BARENTS as a dedicated zone for Data Lakes. This way, BARENTS is seamlessly embeddable into established Big Data infrastructures. This article is an extended and revised version of the conference paper ``Demand-Driven Data Provisioning in Data Lakes: BARENTS - A Tailorable Data Preparation Zone'' by Stach et al. In comparison to our original conference paper, we take a more detailed look at related work in the paper at hand. The emphasis of this extended and revised version, however, is on strategies to improve the performance of BARENTS and enhance its functionality. To this end, we discuss in-depth implementation details of our prototype and introduce a novel recommender system in BARENTS that assists users in specifying data preparation steps.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2022-01&engl=0}
}
@article {ART-2021-04,
   author = {Dennis Przytarski and Christoph Stach and Cl{\'e}mentine Gritti and Bernhard Mitschang},
   title = {{Query Processing in Blockchain Systems: Current State and Future Challenges}},
   journal = {Future Internet},
   editor = {Dino Giuli and Andrew Hudson-Smith and Luis Javier Garcia Villalba},
   publisher = {MDPI},
   volume = {14},
   number = {1},
   pages = {1--31},
   type = {Artikel in Zeitschrift},
   month = {Dezember},
   year = {2021},
   issn = {1999-5903},
   doi = {10.3390/fi14010001},
   keywords = {blockchain systems; query processing; data models; data structures; block structures},
   language = {Englisch},
   cr-category = {H.3.0 Information Storage and Retrieval General,     H.3.3 Information Search and Retrieval},
   contact = {Senden Sie eine E-Mail an Dennis.Przytarski@ipvs.uni-stuttgart.de.},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {When, in 2008, Satoshi Nakamoto envisioned the first distributed database management system that relied on cryptographically secured chain of blocks to store data in an immutable and tamper-resistant manner, his primary use case was the introduction of a digital currency. Owing to this use case, the blockchain system was geared towards efficient storage of data, whereas the processing of complex queries, such as provenance analyses of data history, is out of focus. The increasing use of Internet of Things technologies and the resulting digitization in many domains, however, have led to a plethora of novel use cases for a secure digital ledger. For instance, in the healthcare sector, blockchain systems are used for the secure storage and sharing of electronic health records, while the food industry applies such systems to enable a reliable food-chain traceability, e.g., to prove compliance with cold chains. In these application domains, however, querying the current state is not sufficient - comprehensive history queries are required instead. Due to these altered usage modes involving more complex query types, it is questionable whether today's blockchain systems are prepared for this type of usage and whether such queries can be processed efficiently by them. In our paper, we therefore investigate novel use cases for blockchain systems and elicit their requirements towards a data store in terms of query capabilities. We reflect the state of the art in terms of query support in blockchain systems and assess whether it is capable of meeting the requirements of such more sophisticated use cases. As a result, we identify future research challenges with regard to query processing in blockchain systems.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2021-04&engl=0}
}
@article {ART-2021-03,
   author = {Rebecca Eichler and Corinna Giebler and Christoph Gr{\"o}ger and Holger Schwarz and Bernhard Mitschang},
   title = {{Modeling metadata in data lakes—A generic model}},
   journal = {Data \& Knowledge Engineering},
   publisher = {Elsevier},
   volume = {136},
   pages = {1--17},
   type = {Artikel in Zeitschrift},
   month = {November},
   year = {2021},
   issn = {0169-023X},
   doi = {10.1016/j.datak.2021.101931},
   keywords = {Metadata management; Metadata model; Data lake; Data management; Data lake zones; Metadata classification},
   language = {Englisch},
   cr-category = {H.2 Database Management},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Data contains important knowledge and has the potential to provide new insights. Due to new technological developments such as the Internet of Things, data is generated in increasing volumes. In order to deal with these data volumes and extract the data{\^a}{\^a}‚¬{\^a}„¢s value new concepts such as the data lake were created. The data lake is a data management platform designed to handle data at scale for analytical purposes. To prevent a data lake from becoming inoperable and turning into a data swamp, metadata management is needed. To store and handle metadata, a generic metadata model is required that can reflect metadata of any potential metadata management use case, e.g., data versioning or data lineage. However, an evaluation of existent metadata models yields that none so far are sufficiently generic as their design basis is not suited. In this work, we use a different design approach to build HANDLE, a generic metadata model for data lakes. The new metadata model supports the acquisition of metadata on varying granular levels, any metadata categorization, including the acquisition of both metadata that belongs to a specific data element as well as metadata that applies to a broader range of data. HANDLE supports the flexible integration of metadata and can reflect the same metadata in various ways according to the intended utilization. Furthermore, it is created for data lakes and therefore also supports data lake characteristics like data lake zones. With these capabilities HANDLE enables comprehensive metadata management in data lakes. HANDLE{\^a}{\^a}‚¬{\^a}„¢s feasibility is shown through the application to an exemplary access-use-case and a prototypical implementation. By comparing HANDLE with existing models we demonstrate that it can provide the same information as the other models as well as adding further capabilities needed for metadata management in data lakes.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2021-03&engl=0}
}
@article {ART-2020-20,
   author = {Corinna Giebler and Christoph Gr{\"o}ger and Eva Hoos and Rebecca Eichler and Holger Schwarz and Bernhard Mitschang},
   title = {{Data Lakes auf den Grund gegangen - Herausforderungen und Forschungsl{\"u}cken in der Industriepraxis}},
   journal = {Datenbank Spektrum},
   publisher = {Springer},
   volume = {20},
   pages = {57--69},
   type = {Artikel in Zeitschrift},
   month = {Januar},
   year = {2020},
   keywords = {Data Lakes; Analytics; Stand der Technik; Herausforderungen; Praxisbeispiel},
   language = {Deutsch},
   cr-category = {H.4 Information Systems Applications},
   contact = {Senden Sie eine E-Mail an Corinna.Giebler@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Unternehmen stehen zunehmend vor der Herausforderung, gro{\ss}e, heterogene Daten zu verwalten und den darin enthaltenen Wert zu extrahieren. In den letzten Jahren kam darum der Data Lake als neuartiges Konzept auf, um diese komplexen Daten zu verwalten und zu nutzen. Wollen Unternehmen allerdings einen solchen Data Lake praktisch umsetzen, so sto{\ss}en sie auf vielf{\"a}ltige Herausforderungen, wie beispielsweise Widerspr{\"u}che in der Definition oder unscharfe und fehlende Konzepte. In diesem Beitrag werden konkrete Projekte eines global agierenden Industrieunternehmens genutzt, um bestehende Herausforderungen zu identifizieren und Anforderungen an Data Lakes herzuleiten. Diese Anforderungen werden mit der verf{\"u}gbaren Literatur zum Thema Data Lake sowie mit existierenden Ans{\"a}tzen aus der Forschung abgeglichen. Die Gegen{\"u}berstellung zeigt, dass f{\"u}nf gro{\ss}e Forschungsl{\"u}cken bestehen: 1. Unklare Datenmodellierungsmethoden, 2. Fehlende Data-Lake-Referenzarchitektur, 3. Unvollst{\"a}ndiges Metadatenmanagementkonzept, 4. Unvollst{\"a}ndiges Data-Lake-Governance-Konzept, 5. Fehlende ganzheitliche Realisierungsstrategie.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2020-20&engl=0}
}
@article {ART-2020-11,
   author = {Corinna Giebler and Christoph Gr{\"o}ger and Eva Hoos and Rebecca Eichler and Holger Schwarz and Bernhard Mitschang},
   title = {{Data Lakes auf den Grund gegangen - Herausforderungen und Forschungsl{\"u}cken in der Industriepraxis}},
   journal = {Datenbank Spektrum},
   publisher = {Springer-Verlag},
   volume = {20},
   pages = {57--69},
   type = {Artikel in Zeitschrift},
   month = {Januar},
   year = {2020},
   keywords = {Data Lakes; Industryerfahrung},
   language = {Deutsch},
   cr-category = {H.2.1 Database Management Logical Design},
   contact = {Senden Sie eine E-Mail an Corinna.Giebler@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Unternehmen stehen zunehmend vor der Herausforderung, gro{\ss}e, heterogene Daten zu verwalten und den darin enthaltenen Wert zu extrahieren. In den letzten Jahren kam darum der Data Lake als neuartiges Konzept auf, um diese komplexen Daten zu verwalten und zu nutzen. Wollen Unternehmen allerdings einen solchen Data Lake praktisch umsetzen, so sto{\ss}en sie auf vielf{\"a}ltige Herausforderungen, wie beispielsweise Widerspr{\"u}che in der Definition oder unscharfe und fehlende Konzepte. In diesem Beitrag werden konkrete Projekte eines global agierenden Industrieunternehmens genutzt, um bestehende Herausforderungen zu identifizieren und Anforderungen an Data Lakes herzuleiten. Diese Anforderungen werden mit der verf{\"u}gbaren Literatur zum Thema Data Lake sowie mit existierenden Ans{\"a}tzen aus der Forschung abgeglichen. Die Gegen{\"u}berstellung zeigt, dass f{\"u}nf gro{\ss}e Forschungsl{\"u}cken bestehen: 1. Unklare Datenmodellierungsmethoden, 2. Fehlende Data-Lake-Referenzarchitektur, 3. Unvollst{\"a}ndiges Metadatenmanagementkonzept, 4. Unvollst{\"a}ndiges Data-Lake-Governance-Konzept, 5. Fehlende ganzheitliche Realisierungsstrategie.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2020-11&engl=0}
}
@article {ART-2020-10,
   author = {Corinna Giebler and Christoph Gr{\"o}ger and Eva Hoos and Rebecca Eichler and Holger Schwarz and Bernhard Mitschang},
   title = {{Data Lakes auf den Grund gegangen - Herausforderungen und Forschungsl{\"u}cken in der Industriepraxis}},
   journal = {Datenbank Spektrum},
   publisher = {Springer-Verlag},
   volume = {20},
   pages = {57--69},
   type = {Artikel in Zeitschrift},
   month = {Januar},
   year = {2020},
   keywords = {Data Lakes; Industryerfahrung},
   language = {Deutsch},
   cr-category = {H.2.1 Database Management Logical Design},
   contact = {Senden Sie eine E-Mail an Corinna.Giebler@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Unternehmen stehen zunehmend vor der Herausforderung, gro{\ss}e, heterogene Daten zu verwalten und den darin enthaltenen Wert zu extrahieren. In den letzten Jahren kam darum der Data Lake als neuartiges Konzept auf, um diese komplexen Daten zu verwalten und zu nutzen. Wollen Unternehmen allerdings einen solchen Data Lake praktisch umsetzen, so sto{\ss}en sie auf vielf{\"a}ltige Herausforderungen, wie beispielsweise Widerspr{\"u}che in der Definition oder unscharfe und fehlende Konzepte. In diesem Beitrag werden konkrete Projekte eines global agierenden Industrieunternehmens genutzt, um bestehende Herausforderungen zu identifizieren und Anforderungen an Data Lakes herzuleiten. Diese Anforderungen werden mit der verf{\"u}gbaren Literatur zum Thema Data Lake sowie mit existierenden Ans{\"a}tzen aus der Forschung abgeglichen. Die Gegen{\"u}berstellung zeigt, dass f{\"u}nf gro{\ss}e Forschungsl{\"u}cken bestehen: 1. Unklare Datenmodellierungsmethoden, 2. Fehlende Data-Lake-Referenzarchitektur, 3. Unvollst{\"a}ndiges Metadatenmanagementkonzept, 4. Unvollst{\"a}ndiges Data-Lake-Governance-Konzept, 5. Fehlende ganzheitliche Realisierungsstrategie.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2020-10&engl=0}
}
@article {ART-2020-04,
   author = {Corinna Giebler and Christoph Gr{\"o}ger and Eva Hoos and Rebecca Eichler and Holger Schwarz and Bernhard Mitschang},
   title = {{Data Lakes auf den Grund gegangen: Herausforderungen und Forschungsl{\"u}cken in der Industriepraxis}},
   journal = {Datenbank-Spektrum},
   publisher = {Springer},
   volume = {20},
   number = {1},
   pages = {57--69},
   type = {Artikel in Zeitschrift},
   month = {Januar},
   year = {2020},
   doi = {10.1007/s13222-020-00332-0},
   keywords = {Data Lake; Analytics; Stand der Technik; Herausforderungen; Praxisbeispiel},
   language = {Deutsch},
   cr-category = {A.1 General Literature, Introductory and Survey,     E.0 Data General},
   ee = {https://rdcu.be/b0WM8},
   contact = {Senden Sie eine E-Mail an Corinna.Giebler@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Unternehmen stehen zunehmend vor der Herausforderung, gro{\ss}e, heterogene Daten zu verwalten und den darin enthaltenen Wert zu extrahieren. In den letzten Jahren kam darum der Data Lake als neuartiges Konzept auf, um diese komplexen Daten zu verwalten und zu nutzen. Wollen Unternehmen allerdings einen solchen Data Lake praktisch umsetzen, so sto{\ss}en sie auf vielf{\"a}ltige Herausforderungen, wie beispielsweise Widerspr{\"u}che in der Definition oder unscharfe und fehlende Konzepte. In diesem Beitrag werden konkrete Projekte eines global agierenden Industrieunternehmens genutzt, um bestehende Herausforderungen zu identifizieren und Anforderungen an Data Lakes herzuleiten. Diese Anforderungen werden mit der verf{\"u}gbaren Literatur zum Thema Data Lake sowie mit existierenden Ans{\"a}tzen aus der Forschung abgeglichen. Die Gegen{\"u}berstellung zeigt, dass f{\"u}nf gro{\ss}e Forschungsl{\"u}cken bestehen: 1. Unklare Datenmodellierungsmethoden, 2. Fehlende Data-Lake-Referenzarchitektur, 3. Unvollst{\"a}ndiges Metadatenmanagementkonzept, 4. Unvollst{\"a}ndiges Data-Lake-Governance-Konzept, 5. Fehlende ganzheitliche Realisierungsstrategie.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2020-04&engl=0}
}
@article {ART-2019-12,
   author = {Mathias Mormul and Pascal Hirmer and Matthias Wieland and Bernhard Mitschang},
   title = {{Distributed Situation Recognition in Industry 4.0}},
   journal = {International Journal On Advances in Intelligent Systems},
   publisher = {IARIA},
   volume = {12},
   number = {1},
   pages = {39--49},
   type = {Artikel in Zeitschrift},
   month = {August},
   year = {2019},
   issn = {1942-2679},
   keywords = {Industry 4.0; Edge Computing; Situation Recognition; Distribution Pattern},
   language = {Englisch},
   cr-category = {E.0 Data General},
   ee = {https://www.iariajournals.org/intelligent_systems/intsys_v12_n12_2019_paged.pdf},
   contact = {mathias.mormul@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {In recent years, advances in the Internet of Things led to new approaches and applications, for example, in the domains Smart Factories or Smart Cities. However, with the advantages such applications bring, also new challenges arise. One of these challenges is the recognition of situations, e.g., machine failures in Smart Factories. Especially in the domain of industrial manufacturing, several requirements have to be met in order to deliver a reliable and efficient situation recognition. One of these requirements is distribution in order to achieve high efficiency. In this article, we present a layered modeling approach to enable distributed situation recognition. These layers include the modeling, the deployment, and the execution of the situation recognition. Furthermore, we enable tool support to decrease the complexity for domain users.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2019-12&engl=0}
}
@article {ART-2019-10,
   author = {Cornelia Kiefer and Peter Reimann and Bernhard Mitschang},
   title = {{QUALM: Ganzheitliche Messung und Verbesserung der Datenqualit{\"a}t in der Textanalyse}},
   journal = {Datenbank-Spektrum},
   publisher = {Springer Verlag},
   pages = {1--12},
   type = {Artikel in Zeitschrift},
   month = {Juni},
   year = {2019},
   doi = {https://doi.org/10.1007/s13222-019-00318-7},
   keywords = {Datenqualit{\"a}t; Textanalyse; Text Mining; Trainingsdaten; Semantische Ressourcen},
   language = {Deutsch},
   cr-category = {H.3 Information Storage and Retrieval},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Bestehende Ans{\"a}tze zur Messung und Verbesserung der Qualit{\"a}t von Textdaten in der Textanalyse bringen drei gro{\ss}e Nachteile mit sich. Evaluationsmetriken wie zum Beispiel Accuracy messen die Qualit{\"a}t zwar verl{\"a}sslich, sie (1) sind jedoch auf aufw{\"a}ndig h{\"a}ndisch zu erstellende Goldannotationen angewiesen und (2) geben keine Ansatzpunkte f{\"u}r die Verbesserung der Qualit{\"a}t. Erste dom{\"a}nenspezifische Datenqualit{\"a}tsmethoden f{\"u}r unstrukturierte Textdaten kommen zwar ohne Goldannotationen aus und geben Ansatzpunkte zur Verbesserung der Datenqualit{\"a}t. Diese Methoden wurden jedoch nur f{\"u}r begrenzte Anwendungsgebiete entwickelt und (3) ber{\"u}cksichtigen deshalb nicht die Spezifika vieler Analysetools in Textanalyseprozessen. In dieser Arbeit pr{\"a}sentieren wir hierzu das QUALM-Konzept zum qualitativ hochwertigen Mining von Textdaten (QUALity Mining), das die drei o.g. Nachteile adressiert. Das Ziel von QUALM ist es, die Qualit{\"a}t der Analyseergebnisse, z. B. bzgl. der Accuracy einer Textklassifikation, auf Basis einer Messung und Verbesserung der Datenqualit{\"a}t zu erh{\"o}hen. QUALM bietet hierzu eine Menge an QUALM-Datenqualit{\"a}tsmethoden. QUALM-Indikatoren erfassen die Datenqualit{\"a}t ganzheitlich auf Basis der Passung zwischen den Eingabedaten und den Spezifika der Analysetools, wie den verwendeten Features, Trainingsdaten und semantischen Ressourcen (wie zum Beispiel W{\"o}rterb{\"u}chern oder Taxonomien). Zu jedem Indikator geh{\"o}rt ein passender Modifikator, mit dem sowohl die Daten als auch die Spezifika der Analysetools ver{\"a}ndert werden k{\"o}nnen, um die Datenqualit{\"a}t zu erh{\"o}hen. In einer ersten Evaluation von QUALM zeigen wir f{\"u}r konkrete Analysetools und Datens{\"a}tze, dass die Anwendung der QUALM-Datenqualit{\"a}tsmethoden auch mit einer Erh{\"o}hung der Qualit{\"a}t der Analyseergebnisse im Sinne der Evaluationsmetrik Accuracy einhergeht. Die Passung zwischen Eingabedaten und Spezifika der Analysetools wird hierzu mit konkreten QUALM-Modifikatoren erh{\"o}ht, die zum Beispiel Abk{\"u}rzungen aufl{\"o}sen oder automatisch auf Basis von Text{\"a}hnlichkeitsmetriken passende Trainingsdaten vorschlagen.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2019-10&engl=0}
}
@article {ART-2018-07,
   author = {Eva Hoos and Pascal Hirmer and Bernhard Mitschang},
   title = {{Automated Creation and Provisioning of Decision Information Packages for the Smart Factory}},
   journal = {Complex Systems Informatics and Modeling Quarterly},
   publisher = {Online},
   volume = {15},
   pages = {72--89},
   type = {Artikel in Zeitschrift},
   month = {August},
   year = {2018},
   issn = {2255-9922},
   doi = {10.7250/csimq.2018-15.04},
   keywords = {Industry 4.0; Context-awareness; Data Provisioning},
   language = {Englisch},
   cr-category = {H.0 Information Systems General},
   ee = {https://csimq-journals.rtu.lv/article/view/csimq.2018-15.04},
   contact = {Pascal.Hirmer@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2018-07&engl=0}
}
@article {ART-2017-10,
   author = {Ana Cristina Franco da Silva and Pascal Hirmer and Uwe Breitenb{\"u}cher and Oliver Kopp and Bernhard Mitschang},
   title = {{Customization and provisioning of complex event processing using TOSCA}},
   journal = {Computer Science - Research and Development},
   publisher = {Springer Berlin Heidelberg},
   pages = {1--11},
   type = {Artikel in Zeitschrift},
   month = {September},
   year = {2017},
   issn = {1865-2042},
   issn = {1865-2034},
   doi = {10.1007/s00450-017-0386-z},
   keywords = {Internet of Things; Complex event processing; Customization; TOSCA},
   language = {Englisch},
   cr-category = {K.6 Management of Computing and Information Systems,     D.2.12 Software Engineering Interoperability},
   ee = {https://link.springer.com/article/10.1007/s00450-017-0386-z},
   contact = {Ana-Cristina.Franco-da-Silva@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2017-10&engl=0}
}
@article {ART-2017-09,
   author = {Pascal Hirmer and Michael Behringer and Bernhard Mitschang},
   title = {{Partial execution of Mashup Plans during modeling time}},
   journal = {Computer Science - Research and Development},
   publisher = {Springer Berlin Heidelberg},
   pages = {1--12},
   type = {Artikel in Zeitschrift},
   month = {September},
   year = {2017},
   issn = {1865-2034},
   issn = {1865-2042},
   doi = {10.1007/s00450-017-0388-x},
   keywords = {Workflows; Modeling; BPEL; Partial execution; Data Mashups},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications,     H.3.0 Information Storage and Retrieval General,     E.1 Data Structures},
   ee = {https://link.springer.com/article/10.1007%2Fs00450-017-0388-x},
   contact = {pascal.hirmer@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2017-09&engl=0}
}
@article {ART-2016-23,
   author = {Pascal Hirmer and Uwe Breitenb{\"u}cher and Ana Cristina Franco da Silva and K{\'a}lm{\'a}n K{\'e}pes and Bernhard Mitschang and Matthias Wieland},
   title = {{Automating the Provisioning and Configuration of Devices in the Internet of Things}},
   journal = {Complex Systems Informatics and Modeling Quarterly},
   publisher = {Online},
   volume = {9},
   pages = {28--43},
   type = {Artikel in Zeitschrift},
   month = {Dezember},
   year = {2016},
   doi = {10.7250/csimq.2016-9.02},
   issn = {2255 - 9922},
   keywords = {Internet of Things; sensors; actuators; digital twin; ontologies; TOSCA},
   language = {Englisch},
   cr-category = {J.6 Computer-Aided Engineering,     H.3.1 Content Analysis and Indexing},
   ee = {https://csimq-journals.rtu.lv/article/view/csimq.2016-9.02/pdf_8},
   contact = {pascal.hirmer@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The Internet of Things benefits from an increasing number of interconnected technical devices. This has led to the existence of so-called smart environments, which encompass one or more devices sensing, acting, and automatically performing different tasks to enable their self-organization. Smart environments are divided into two parts: the physical environment and its digital representation, oftentimes referred to as digital twin. However, the automated binding and monitoring of devices of smart environments are still major issues. In this article we present a method and system architecture to cope with these challenges by enabling (i) easy modeling of sensors, actuators, devices, and their attributes, (ii) dynamic device binding based on their type, (iii) the access to devices using different paradigms, and (iv) the monitoring of smart environments in regard to failures or changes. We furthermore provide a prototypical implementation of the introduced approach.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-23&engl=0}
}
@article {ART-2016-18,
   author = {Frank Steimle and Matthias Wieland and Bernhard Mitschang and Sebastian Wagner and Frank Leymann},
   title = {{Extended provisioning, security and analysis techniques for the ECHO health data management system}},
   journal = {Computing},
   publisher = {Springer},
   pages = {1--19},
   type = {Artikel in Zeitschrift},
   month = {Oktober},
   year = {2016},
   doi = {10.1007/s00607-016-0523-8},
   language = {Englisch},
   cr-category = {C.2.4 Distributed Systems,     H.2.8 Database Applications,     J.3 Life and Medical Sciences},
   ee = {http://dx.doi.org/10.1007/s00607-016-0523-8},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {eHealth gains more and more interest since a lot of end-user devices supporting health data capturing are available. The captured data has to be managed and securely stored, in order to access it from different devices and share it with other users such as physicians. The aim of the german-greek research project ECHO is to support the treatment of patients, who suffer from chronic obstructive pulmonary disease, a chronic respiratory disease. Usually the patients need to be examined by their physicians on a regular basis due to their chronic condition. Since this is very time consuming and expensive we developed an eHealth system which allows the physician to monitor patients condition remotely, e.g., via smart phones. This article is an extension of previous work, where we introduced a health data model and an associated platform-architecture for the management and analysis of the data provided by the patients. There we have also shown how the security of the data is ensured and we explained how the platform can be provided in a cloud-based environment using the OASIS standard TOSCA, which enables a self-contained management of cloud-services. In this article we provide a more detailed description about the health data analysis, provisioning and security aspects of the eHealth system.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-18&engl=0}
}
@article {ART-2016-16,
   author = {Mathias Mormul and Pascal Hirmer and Matthias Wieland and Bernhard Mitschang},
   title = {{Situation model as interface between situation recognition and situation-aware applications}},
   journal = {Computer Science - Research and Development},
   publisher = {Springer Berlin Heidelberg},
   pages = {1--12},
   type = {Artikel in Zeitschrift},
   month = {November},
   year = {2016},
   doi = {10.1007/s00450-016-0335-2},
   keywords = {Situation; Situation-awareness; Data management; Internet of things; Context; Context-awareness},
   language = {Englisch},
   cr-category = {J.6 Computer-Aided Engineering,     H.3.1 Content Analysis and Indexing},
   ee = {http://link.springer.com/article/10.1007/s00450-016-0335-2},
   contact = {pascal.hirmer@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The upcoming of internet of things draws interest of many companies and leads to the creation of smart environments. The foundation necessary for this purpose lies in the integration of sensors, which continuously provide context data of their environment. Based on this context, changes of state in the environment, i.e., situations, can be detected. However, with the huge amount of heterogeneous context and its processing, new challenges arise. Simultaneously, the dynamic behavior of the environment demands automated mechanisms for applications to adapt to the situations automatically and in a timely manner. To meet this challenge, we present (1) the situation model as a data model for integrating all data related to situation recognition, and (2) the management and provisioning of situations based on this situation model to further decouple situation recognition and applications adapting to recognized situations. Furthermore, we present a prototypical implementation of the situation model and its management.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-16&engl=0}
}
@article {ART-2016-14,
   author = {Ana Cristina Franco da Silva and Pascal Hirmer and Matthias Wieland and Bernhard Mitschang},
   title = {{SitRS XT – Towards Near Real Time Situation Recognition}},
   journal = {Journal of Information and Data Management},
   publisher = {-},
   volume = {7},
   number = {1},
   pages = {4--17},
   type = {Artikel in Zeitschrift},
   month = {April},
   year = {2016},
   keywords = {Complex Event Processing; Internet of Things; Situation-awareness; Situation Recognition},
   language = {Englisch},
   cr-category = {H.3 Information Storage and Retrieval,     I.5 Pattern Recognition},
   ee = {https://seer.lcc.ufmg.br/index.php/jidm/article/view/2109},
   contact = {franco-da-silva@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Nowadays, the Internet of Things gains more and more attention through cheap, highly interconnected hardware devices that are attached with sensors and actuators. This results in an instrumented environment that provides sufficient context information to drive what is called situation recognition. Situations are derived from large amounts of context data, which is difficult to handle. In this article, we present SitRS XT, an extension of our previously introduced situation recognition service SitRS, to enable situation recognition in near real time. SitRS XT provides easy to use situation recognition based on Complex Event Processing, which is highly efficient. The architecture and method of SitRS XT is described and evaluated through a prototypical implementation.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-14&engl=0}
}
@article {ART-2016-13,
   author = {Pascal Hirmer and Bernhard Mitschang},
   title = {{TOSCA4Mashups: enhanced method for on-demand data mashup provisioning}},
   journal = {Computer Science - Research and Development},
   publisher = {Springer Berlin Heidelberg},
   pages = {1--10},
   type = {Artikel in Zeitschrift},
   month = {Oktober},
   year = {2016},
   doi = {10.1007/s00450-016-0330-7},
   keywords = {Data Mashups; TOSCA; Provisioning; Cloud Computing},
   language = {Englisch},
   cr-category = {E.0 Data General,     E.1 Data Structures,     H.1 Models and Principles},
   ee = {http://link.springer.com/article/10.1007/s00450-016-0330-7},
   contact = {Pascal.Hirmer@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Nowadays, the amount of data increases tremendously. Extracting information and generating knowledge from this data is a great challenge. To cope with this issue – oftentimes referred to as big data problem – we need effective means for efficient data integration, data processing, and data analysis. To enable flexible, explorative and ad-hoc data processing, several data mashup approaches and tools have been developed in the past. One of these tools is FlexMash – a data mashup tool developed at the University of Stuttgart. By offering domain-specific graphical modeling as well as a pattern-based execution, FlexMash enables usage by a wide range of users, both domain experts and technical experts. The core idea of FlexMash is a flexible execution of data mashups using different, user-requirement-dependent execution components. In this paper, we present a new approach for on-demand, automated provisioning of these components in a cloud computing environment using the Topology and Orchestration Specification for Cloud Applications. This enables many advantages for mashup execution such as scalability, availability and cost savings.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-13&engl=0}
}
@article {ART-2016-12,
   author = {Pascal Hirmer and Matthias Wieland and Holger Schwarz and Bernhard Mitschang and Uwe Breitenb{\"u}cher and Santiago G{\'o}mez S{\'a}ez and Frank Leymann},
   title = {{Situation recognition and handling based on executing situation templates and situation-aware workflows}},
   journal = {Computing},
   publisher = {Springer},
   pages = {1--19},
   type = {Artikel in Zeitschrift},
   month = {Oktober},
   year = {2016},
   doi = {10.1007/s00607-016-0522-9},
   keywords = {Situation Recognition; IoT; Context; Integration; Cloud Computing; Workflows; Middleware},
   language = {Englisch},
   cr-category = {J.6 Computer-Aided Engineering,     H.3.1 Content Analysis and Indexing},
   ee = {http://dx.doi.org/10.1007/s00607-016-0522-9},
   contact = {pascal.hirmer@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Today, the Internet of Things has evolved due to an advanced interconnectivity of hardware devices equipped with sensors and actuators. Such connected environments are nowadays well-known as smart environments. Famous examples are smart homes, smart cities, and smart factories. Such environments should only be called {\ss}mart`` if they allow monitoring and self-organization. However, this is a great challenge: (1) sensors have to be bound and sensor data have to be efficiently provisioned to enable monitoring of these environments, (2) situations have to be detected based on sensor data, and (3) based on the recognized situations, a reaction has to be triggered to enable self-organization, e.g., through notification delivery or the execution of workflows. In this article, we introduce SitOPT---an approach for situation recognition based on raw sensor data and automated handling of occurring situations through notification delivery or execution of situation-aware workflows. This article is an extended version of the paper ''SitRS - Situation Recognition based on Modeling and Executing Situation Templates`` presented at the 9th Symposium and Summer School of Service-oriented Computing 2015.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-12&engl=0}
}
@article {ART-2016-06,
   author = {Christoph Gr{\"o}ger and Christoph Stach and Bernhard Mitschang and Engelbert Westk{\"a}mper},
   title = {{A mobile dashboard for analytics-based information provisioning on the shop floor}},
   journal = {International Journal of Computer Integrated Manufacturing},
   publisher = {Taylor \& Francis Inc.},
   pages = {1--20},
   type = {Artikel in Zeitschrift},
   month = {Mai},
   year = {2016},
   doi = {10.1080/0951192X.2016.1187292},
   keywords = {dashboard; cockpit; process optimisation; data analytics; business intelligence; data mining},
   language = {Englisch},
   cr-category = {H.4.0 Information Systems Applications General,     J.2 Physical Sciences and Engineering},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Today's turbulent global environment requires agility and flexibility of manufacturing companies to stay competitive. Thus, employees have to monitor their performance continuously and react quickly to turbulences which demands real-time information provisioning across all hierarchy levels. However, existing manufacturing IT systems, for example, manufacturing execution systems (MES), do hardly address information needs of individual employees on the shop floor. Besides, they do not exploit advanced analytics to generate novel insights for process optimisation. To address these issues, the operational process dashboard for manufacturing (OPDM) is presented, a mobile data-mining-based dashboard for workers and supervisors on the shop floor. It enables proactive optimisation by providing analytical information anywhere and anytime in the factory. In this paper, first, user groups and conceptual dashboard services are defined. Then, IT design issues of a mobile shop floor application on top of the advanced manufacturing analytics platform are investigated in order to realise the OPDM. This comprises the evaluation of different types of mobile devices, the development of an appropriate context model and the investigation of security issues. Finally, an evaluation in an automotive industry case is presented using a prototype in order to demonstrate the benefits of the OPDM for data-driven process improvement and agility in manufacturing.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-06&engl=0}
}
@article {ART-2015-05,
   author = {Christoph Stach and Bernhard Mitschang},
   title = {{Der Secure Data Container (SDC)}},
   journal = {Datenbank-Spektrum},
   address = {Berlin, Heidelberg},
   publisher = {Springer Verlag},
   volume = {15},
   number = {2},
   pages = {109--118},
   type = {Artikel in Zeitschrift},
   month = {Juli},
   year = {2015},
   issn = {1618-2162},
   doi = {10.1007/s13222-015-0189-y},
   keywords = {Datenschutz; Schutzziele; PMP-Erweiterung; Datencontainer; Evaluation},
   language = {Deutsch},
   cr-category = {K.4.1 Computers and Society Public Policy Issues,     D.4.6 Operating Systems Security and Protection},
   contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Mobile Endger{\"a}te wurden zu Marc Weisers Computer des 21. Jahrhunderts, da sie als dauerhaft verf{\"u}gbare Informationsquelle Einzug in unseren Alltag gehalten haben. Auf ihnen treffen private Daten (z.B. Fotos) auf Kontextdaten (z.B. Standortdaten); verkn{\"u}pft stellen diese ein immenses Sicherheitsrisiko dar. Wie eine Vielzahl an Datendiebst{\"a}hlen belegt, reichen die existierenden Datensicherheitssysteme f{\"u}r Mobilplattformen bei weitem nicht aus. Daher bedarf es einer Identifikation m{\"o}glicher Angriffsvektoren sowie einer Analyse der speziellen Schutzziele eines solchen Systems. Darauf basierend wird die Privacy Management Platform, ein Berechtigungssystem, mithilfe des neu eingef{\"u}hrten Secure Data Containers zu einem ganzheitlichen Datensicherheitssystem erweitert. Dabei zeigt sich, dass diese Kombination alle Schutzziele erf{\"u}llt und dennoch hochperformant ist. Obwohl die vorgestellten Prototypen auf Android basieren, ist das Konzept auch auf andere App-Plattformen anwendbar.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2015-05&engl=0}
}
@article {ART-2015-03,
   author = {Stefan Silcher and Erwin Gro{\ss} and Jan K{\"o}nigsberger and J{\"o}rg Siegert and Michael Lickefett and Thomas Bauernhansl and Bernhard Mitschang},
   title = {{Mobile Fabriklayoutplanung}},
   journal = {wt Werkstattstechnik online},
   address = {D{\"u}sseldorf},
   publisher = {Springer-VDI-Verlag},
   volume = {105},
   number = {3},
   pages = {96--101},
   type = {Artikel in Zeitschrift},
   month = {M{\"a}rz},
   year = {2015},
   language = {Deutsch},
   cr-category = {J.6 Computer-Aided Engineering,     H.4.2 Information Systems Applications Types of Systems},
   ee = {http://www.werkstattstechnik.de/wt/currentarticle.php?data[article_id]=82746},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Eine stetig steigende Produktvielfalt sowie ein sich schnell {\"a}nderndes Marktumfeld erfordern neue Methoden der Planung, um auf diese Komplexit{\"a}t der M{\"a}rkte angemessen zu reagieren. Einen wichtigen Stellhebel bildet dabei die Wandlungsf{\"a}higkeit, die durch geeignete Planungsmethoden unterst{\"u}tzt und optimiert wird. Gerade in der Layoutplanung k{\"o}nnen geeignete Methoden und Systeme zu einem Wettbewerbsvorteil f{\"u}hren. Dieser Fachartikel geht am Beispiel der Lernfabrik {\^a}€žadvanced Industrial Engineering{\^a}€œ (aIE) und einer f{\"u}r das Android-Betriebssystem entwickelten App zur Layoutplanung auf die mobile Layoutplanung {\^a}€žvor Ort{\^a}€œ in der Fabrik ein.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2015-03&engl=0}
}
@article {ART-2012-12,
   author = {Nazario Cipriani and Oliver D{\"o}rler and Bernhard Mitschang},
   title = {{Sicherer Zugriff und sichere Verarbeitung von Kontextdatenstr{\"o}men in einer verteilten Umgebung}},
   journal = {Datenbank-Spektrum ``Data Streams and Event Processing''},
   publisher = {dpunkt.verlag},
   volume = {12},
   number = {1},
   pages = {13--22},
   type = {Artikel in Zeitschrift},
   month = {M{\"a}rz},
   year = {2012},
   language = {Deutsch},
   cr-category = {K.6.5 Security and Protection,     D.4.6 Operating Systems Security and Protection},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Technologischer Fortschritt im Bereich der Mikroelektronik und Kommunikationstechnik f{\"u}hren zunehmend zu einem stark vernetzten, mit Sensoren ausgestatteten Umfeld. Die damit einhergehende stetig steigende Anzahl an Sensorinformationen, deren Daten in Form von Datenstr{\"o}men bereitgestellt werden, erm{\"o}glichen neue Anwendungsszenarien und treiben neue Verarbeitungstechniken. Im Kontext der sichverst{\"a}rkenden Durchdringung des allt{\"a}glichen Lebens mit sozialen Medien und der gleichzeitigen Auswertung von beispielsweise Positionsinformationen, w{\"a}chst die Bedeutung der Zugriffskontrolle auf Information. Die Herausforderung in diesem Zusammenhang besteht darin, Mechanismen zur Verf{\"u}gung zu stellen, die eine Regelung des Datenzugriffs erm{\"o}glichen und die Datenstromverarbeitung effizient und flexibel unterst{\"u}tzen. Diese Arbeit stellt ein flexibles Rahmenwerk zur sicheren Verarbeitung von Kontextdaten vor, das es Anbietern von Daten in Datenstromverarbeitungssystemen erm{\"o}glicht, den Zugriff und die Verarbeitung sch{\"u}tzenswerter Daten zu kontrollieren. Hierbei erm{\"o}glicht das vorgestellte Konzept im Gegensatz zu bisherigen Konzepten insbesondere den feingranularen Zugriff auf Kontextdaten.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2012-12&engl=0}
}
@article {ART-2011-19,
   author = {Bernhard Mitschang and Holger Schwarz},
   title = {{Der Lehrstuhl ”Datenbanken und Informationssysteme” an der Universit{\"a}t Stuttgart stellt sich vor}},
   journal = {Datenbank-Spektrum},
   publisher = {Springer},
   volume = {11},
   number = {3},
   pages = {213--217},
   type = {Artikel in Zeitschrift},
   month = {November},
   year = {2011},
   language = {Deutsch},
   cr-category = {H.2 Database Management},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {In diesem Beitrag stellen wir den Lehrstuhl f{\"u}r Datenbanken und Informationssysteme der Universit{\"a}t Stuttgart unter der Leitung von Prof. Dr. Bernhard Mitschang vor. Nach einem {\"U}berblick {\"u}ber die Forschungsschwerpunkte des Lehrstuhls gehen wir auf ausgew{\"a}hlte aktuelle Forschungsprojekte ein und erl{\"a}utern die Beteiligung an der Lehre in Bachelor- und Masterstudieng{\"a}ngen.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2011-19&engl=0}
}
@article {ART-2011-14,
   author = {Peter Reimann and Holger Schwarz and Bernhard Mitschang},
   title = {{Design, Implementation, and Evaluation of a Tight Integration of Database and Workflow Engines}},
   journal = {Journal of Information and Data Management},
   editor = {Alberto H. F. Laender and Mirella M. Moro},
   publisher = {SBC - Brazilian Computer Society},
   volume = {2},
   number = {3},
   pages = {353--368},
   type = {Artikel in Zeitschrift},
   month = {Oktober},
   year = {2011},
   issn = {2178-7107},
   keywords = {Data-Intensive Workflow; Improved Local Data Processing; Scientific Workflow; Simulation Workflow},
   language = {Englisch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     H.2.8 Database Applications,     H.4.1 Office Automation},
   contact = {Peter Reimann Peter.Reimann@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Accessing and processing huge amounts of heterogeneous and distributed data are some of the major challenges of data-intensive workflows. Traditionally, the descriptions of such workflows focus on their data flow. Nevertheless, control-flow-oriented workflow languages are increasingly adapted to the needs of data-intensive workflows. This provides a common level of abstraction for both data-intensive workflows and classical orchestration workflows, e.g., business workflows, which then enables a comprehensive optimization across all workflows. However, the problem still remains that workflows described in control-flow-oriented languages tend to be less efficient for data-intensive processes compared to specialized data-flow-oriented approaches. In this paper, we propose a new kind of optimization targeted at data-intensive workflows that are described in control-flow-oriented languages. We show how to improve efficiency of such workflows by introducing various techniques that partition the local data processing tasks to be performed during workflow execution in an improved way. These data processing tasks are either assigned to the workflow engine or to the tightly integrated local database engine. We evaluate the effectiveness of these techniques by means of various test scenarios.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2011-14&engl=0}
}
@article {ART-2011-12,
   author = {Jorge Minguez and Stefan Silcher and Philipp Riffelmacher and Bernhard Mitschang},
   title = {{A Service Bus Architecture for Application Integration in the Planning and Production Phases of a Product Lifecycle}},
   journal = {International Journal of Systems and Service-Oriented Engineering},
   publisher = {IGI Global},
   volume = {2},
   number = {2},
   pages = {21--36},
   type = {Artikel in Zeitschrift},
   month = {Juni},
   year = {2011},
   issn = {1947-3052},
   keywords = {Manufacturing Service Bus; Service-oriented Architecture; Product Lifecycle Management; SOA; MSB; PLM},
   language = {Englisch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     D.2.13 Software Engineering Reusable Software},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Constantly changing business conditions require a high level of flexibility in business processes as well as an adaptive and fully interoperable IT infrastructure in today’s manufacturing environments. The lack of flexibility prevents manufacturing companies from improving their responsiveness and adapting their workflows to turbulent scenarios. In order to achieve highly flexible and adaptive workflows, information systems in digital factories and shop floors need to be integrated. The most challenging problem in such manufacturing environments is the high heterogeneity of the IT landscape, where the integration of legacy systems and information silos has led to chaotic architectures over the last two decades. In order to overcome this issue, the authors present a flexible integration platform that allows a loose coupling of distributed services in event-driven manufacturing environments. The proposed approach enables a flexible communication between digital factory and shop floor components by introducing a service bus architecture. This solution integrates an application-independent canonical message format for manufacturing events, content-based routing and transformation services as well as event processing workflows.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2011-12&engl=0}
}
@article {ART-2011-04,
   author = {Jorge Minguez and Philipp Riffelmacher and Bernhard Mitschang and Engelbert Westk{\"a}mper},
   title = {{Servicebasierte Integration von Produktionsanwendungen}},
   journal = {Werkstattstechnik online},
   publisher = {Springer-VDI Verlag},
   volume = {3-2011},
   pages = {128--133},
   type = {Artikel in Zeitschrift},
   month = {M{\"a}rz},
   year = {2011},
   keywords = {service-oriented architecture; SOA; ESB; manufacturing; produktion; lernfabrik; produktionsanwendungen; servicebasierte integration},
   language = {Deutsch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     D.2.13 Software Engineering Reusable Software},
   ee = {http://www.technikwissen.de/wt/currentarticle.php?data[article_id]=59574},
   contact = {jorge.minguez@gsame.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {In einem modernen Produktionsumfeld soll es m{\"o}glich sein, informationstechnische Prozesse an die sich zunehmend {\"a}ndernden Gesch{\"a}ftsbedingungen anzupassen. Um eine schnelle Anpassung zu realisieren, ist eine flexible Integration unterschiedlicher Informationssysteme erforderlich, da die Informationsfl{\"u}sse durch system{\"u}bergreifende Datenbearbeitungsprozesse gesteuert werden. Die heterogene Landschaft der digitalen Werkzeuge stellt dabei eine enorme Herausforderung dar. Der vorgestellte servicebasierte Ansatz adressiert diese Problematik.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2011-04&engl=0}
}
@article {ART-2009-30,
   author = {Nicola H{\"o}nle and Matthias Grossmann and Daniela Nicklas and Bernhard Mitschang},
   title = {{Design and implementation of a domain-aware data model for pervasive context information}},
   journal = {Computer Science Research + Development},
   publisher = {Springer},
   volume = {24},
   number = {1-2},
   pages = {69--83},
   type = {Artikel in Zeitschrift},
   month = {September},
   year = {2009},
   language = {Englisch},
   cr-category = {H.2.1 Database Management Logical Design,     H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {We introduce a data model for a context-management middleware that enables context-aware and pervasive computing applications to transparently access available data providers and that effectively combines their data. Our approach supports new data fusion concepts for overlapping and heterogeneous data sets and thus maximizes the information presented to the application. The main part of our data model is a flexible concept for meta data that is able to represent important aspects like quality, data derivation, or temporal characteristics of data. Attributes having multiple values are utilized to represent sensor measurements histories like locations of mobile objects at different points in time. In our paper, we characterize the requirements for our data model and show that existing data models, including the (object-) relational data model and standard XML data models, do not offer the required flexibility. Therefore basic XML technology is extended to support the necessary meta data concept and multiply typed objects.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2009-30&engl=0}
}
@article {ART-2009-09,
   author = {Jing Lu and Bernhard Mitschang},
   title = {{Enforcing Data Consistency in Data Integration Systems by XQuery Trigger Service}},
   journal = {International Journal of Web Information Systems},
   publisher = {Emerald Group Publishing Limited},
   volume = {5},
   number = {2},
   pages = {1--19},
   type = {Artikel in Zeitschrift},
   month = {Mai},
   year = {2009},
   language = {Englisch},
   cr-category = {E.0 Data General},
   contact = {jinglu76@gmail.com},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Nowadays XML-based data integration systems are accepted as data service providers on the web. In order to make such a data integration system fully equipped with data manipulation capabilities, programming frameworks which support update at the integration level are being developed. When the user is permitted to submit updates, it is necessary to establish the best possible data consistency for the whole data integration system. To that extend, we present an approach based on an XQuery trigger service. We define an XQuery trigger model together with its semantics. We report on the integration of the XQuery trigger service into the overall architecture and discuss details of the execution model. Experiments show that our approach provides an easy, efficient and convenient way to achieve data consistency at the global level.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2009-09&engl=0}
}
@article {ART-2009-08,
   author = {Nazario Cipriani and Daniela Nicklas and Matthias Gro{\ss}mann and Nicola H{\"o}nle and Carlos L{\"u}bbe and Bernhard Mitschang},
   title = {{Verteilte Datenstromverarbeitung von Sensordaten}},
   journal = {Datenbank-Spektrum},
   publisher = {dpunkt Verlag},
   volume = {9},
   number = {28},
   pages = {37--43},
   type = {Artikel in Zeitschrift},
   month = {Februar},
   year = {2009},
   language = {Deutsch},
   cr-category = {H.2.4 Database Management Systems,     H.2.8 Database Applications,     E.4 Data Coding and Information Theory},
   contact = {nazario.cipriani@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Technologischer Fortschritt im Bereich der Mikroelektronik und Kommunikationstechnik f{\"u}hren zunehmend zu einem stark vernetzten, mit Sensoren ausgestatteten Umfeld. Die Herausforderung, die in diesem Zusammenhang auftretenden kontinuierlichen Datenstr{\"o}me zu verwalten und effizient zu verarbeiten, sowie heterogene Sensorger{\"a}te und Netztopologien zu integrieren, ist f{\"u}r viele Anwendungsentwickler eine zu gro{\ss}e H{\"u}rde. In dieser Arbeit wird eine Middleware vorgestellt, die es einer Anwendung erm{\"o}glicht, anfragebasiert die Verarbeitung von kontinuierlichen Datenstr{\"o}men zu steuern. Die zur Verarbeitung der Daten ben{\"o}tigten Operatoren werden virtualisiert ausgef{\"u}hrt, um die gr{\"o}{\ss}tm{\"o}gliche Flexibilit{\"a}t bei der Verteilung auf die beteiligten physischen Knoten zu erreichen. Weiterhin werden Ans{\"a}tze zur Komprimierung von Datenstr{\"o}men vorgestellt, um das Gesamtvolumen der ausgetauschten Daten zwischen den Knoten zu reduzieren.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2009-08&engl=0}
}
@article {ART-2006-13,
   author = {Kurt Rothermel and Thomas Ertl and Fritsch Dieter and Paul J. K{\"u}hn and Bernhard Mitschang and Engelbert Westk{\"a}mper and Christian Becker and Dominique Dudkowski and Andreas Gutscher and Christian Hauser and Lamine Jendoubi and Daniela Nicklas and Steffen Volz and Matthias Wieland},
   title = {{SFB 627 – Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme}},
   journal = {Informatik - Forschung und Entwicklung},
   publisher = {Springer-Verlag},
   volume = {21},
   number = {1-2},
   pages = {105--113},
   type = {Artikel in Zeitschrift},
   month = {Juni},
   year = {2006},
   language = {Deutsch},
   cr-category = {C.2.4 Distributed Systems,     H.2.4 Database Management Systems,     H.2.8 Database Applications,     H.3 Information Storage and Retrieval},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/ART-2006-13/ART-2006-13.pdf},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Industrielle Fertigung und Fabrikbetrieb (IFF);     Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme;     Universit{\"a}t Stuttgart, Institut f{\"u}r Photogrammetrie (ifp);     Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Verteilte Systeme;     Universit{\"a}t Stuttgart, Institut f{\"u}r Kommunikationsnetze und Rechnersysteme (IKR)},
   abstract = {Computersysteme, wie wir sie heute kennen, passen sich typischerweise nicht an den Benutzer und dessen Situation an. Erste Beispiele von Systemen, die durch den Bezug zur Realwelt den Kontext des Benutzers einbeziehen, sind Navigationssysteme, die unter Ber{\"u}cksichtigung der Position eines Benutzers und der Verkehrslage Richtungsanweisungen geben k{\"o}nnen. Damit innovative kontextbezogene Anwendungen m{\"o}glich werden, muss der Kontext, also der Zustand der Realwelt, durch Sensoren erfasst, in das Computersystem {\"u}bermittelt und dort in Form dynamischer Umgebungsmodelle den Anwendungen zur Verf{\"u}gung gestellt werden.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2006-13&engl=0}
}
@article {ART-2004-20,
   author = {Bernhard Mitschang and Stefan Jablonski},
   title = {{Middleware-Technologien zur Systemintegration}},
   journal = {Middleware-Technologien zur Systemintegration},
   editor = {Oldenbourg},
   publisher = {Oldenbourg Wissenschaftsverlag},
   volume = {46},
   number = {4},
   pages = {173--174},
   type = {Artikel in Zeitschrift},
   month = {April},
   year = {2004},
   issn = {1611-2776},
   language = {Englisch},
   cr-category = {H.2.2 Database Management Physical Design},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Middleware-Technologien zur Systemintegration},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2004-20&engl=0}
}
@article {ART-2004-14,
   author = {Bernhard Mitschang and Engelbert Westk{\"a}mper and Carmen Constantinescu and Uwe Heinkel and Benno L{\"o}ffler and Ralf Rantzau and Raplh Winkler},
   title = {{Divide et Impera: A Flexible Integration of Layout Planning and Logistics Simulation through Data Change Propagation.}},
   journal = {The CIRP - Journal of Manufacturing Systems},
   editor = {J. Peklenik},
   publisher = {CIRP},
   volume = {33},
   number = {6},
   pages = {509--518},
   type = {Artikel in Zeitschrift},
   month = {November},
   year = {2004},
   language = {Englisch},
   cr-category = {J.6 Computer-Aided Engineering,     C.2.4 Distributed Systems},
   contact = {Uwe.Heinkel@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The turbulent markets lead to new challenges for today’s enterprises, they have to be transformable to stay competitive. Therefore, we developed a new approach that integrates Logistic Simulation and Layout Planning to fulfil the goal of improving the production system. Our approach is based on propagation and transformation of data changes concerning the continuous adaptation tasks among the Layout Planning and Logistics Simulation systems. Instead of relying on a tightly integrated global data schema, we connect systems only as far as required by building “bridges” between them. The systems that participate in the integration are kept autonomous. We use several state-of-the-art XML technologies in our integration system.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2004-14&engl=0}
}
@article {ART-2003-17,
   author = {Bernhard Mitschang},
   title = {{Data propagation as an enabling technology for collaboration and cooperative information systems}},
   journal = {Computers in Industry},
   address = {Amsterdam},
   publisher = {Elsevier Science Publishers B.V.},
   volume = {52},
   pages = {59--69},
   type = {Artikel in Zeitschrift},
   month = {September},
   year = {2003},
   language = {Englisch},
   cr-category = {H.5.3 Group and Organization Interfaces},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Collaboration in cooperative information systems, as occurring in concurrent design and engineering, exploits common work and information spaces. In this paper we present the Transaction-Oriented Group and Coordination Service for Data-Centric Applications (TOGA) and the DataPropagator CHAMPAGNE that together realize a shared information space that is controlled by a basic collaboration service. Our approach enables both, firstly, the evolution of a set of separate applications to form a cooperative information system, i.e. it provides a technique towards component-oriented system engineering. Secondly, it can be exploited as a basic service within collaboration frameworks to effectively manage common work and information spaces.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2003-17&engl=0}
}
@article {ART-2003-03,
   author = {Holger Schwarz and Tobias Kraft and Ralf Rantzau and Bernhard Mitschang},
   title = {{Optimierung von Anfragesequenzen in Business-Intelligence-Anwendungen}},
   journal = {it - Information Technology},
   address = {M{\"u}nchen},
   publisher = {Oldenbourg},
   volume = {45},
   number = {4},
   pages = {196--202},
   type = {Artikel in Zeitschrift},
   month = {August},
   year = {2003},
   keywords = {Data Warehouse, Business Intelligence, Anfragesequenzen, OLAP, Data Mining},
   language = {Deutsch},
   cr-category = {H.2.4 Database Management Systems,     H.2.7 Database Administration,     H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Zur Analyse der Daten in einem Data Warehouse kommen unterschiedliche Business-Intelligence-Anwendungen zum Einsatz. Ein wichtiger Erfolgsfaktor f{\"u}r deren Nutzung ist die Effizienz, mit der die erstellten Anfragen ausgef{\"u}hrt werden. In diesem Beitrag wird zun{\"a}chst das typische Verarbeitungsszenario f{\"u}r generierte Anfragesequenzen im Bereich Business Intelligence erl{\"a}utert. Darauf aufbauend wird eine Reihe anwendungsneutraler Optimierungsstrategien erl{\"a}utert und bewertet. Anhand von Messergebnissen wird gezeigt, dass es sich insbesondere bei der Restrukturierung von Anfragesequenzen um einen vielversprechenden Ansatz handelt.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2003-03&engl=0}
}
@article {ART-2003-02,
   author = {Ralf Rantzau and Leonard Shapiro and Bernhard Mitschang and Quan Wang},
   title = {{Algorithms and Applications for Universal Quantification in Relational Databases}},
   journal = {Information Systems},
   editor = {Christian S. Jensen},
   publisher = {Elsevier},
   volume = {28},
   number = {1-2},
   pages = {3--32},
   type = {Artikel in Zeitschrift},
   month = {Januar},
   year = {2003},
   keywords = {query operators; relational division; grouping; set containment join; frequent itemset discovery},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems},
   ee = {http://www.elsevier.nl/locate/is},
   contact = {rrantzau@acm.org},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {Queries containing universal quantification are used in many applications, including business intelligence applications and in particular data mining. We present a comprehensive survey of the structure and performance of algorithms for universal quantification. We introduce a framework that results in a complete classification of input data for universal quantification. Then we go on to identify the most efficient algorithm for each such class. One of the input data classes has not been covered so far. For this class, we propose several new algorithms. Thus, for the first time, we are able to identify the optimal algorithm to use for any given input dataset. These two classifications of optimal algorithms and input data are important for query optimization. They allow a query optimizer to make the best selection when optimizing at intermediate steps for the quantification problem. In addition to the classification, we show the relationship between relational division and the set containment join and we illustrate the usefulness of employing universal quantifications by presenting a novel approach for frequent itemset discovery.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2003-02&engl=0}
}
@article {ART-2002-08,
   author = {Bernhard Mitschang and Aiko Frank},
   title = {{A customizable shared information space to support concurrent design}},
   journal = {Special Issue of Computers in Industry},
   address = {Amsterdam},
   publisher = {Elsevier Science Publishers B. V.},
   volume = {48},
   number = {1},
   pages = {45--58},
   type = {Artikel in Zeitschrift},
   month = {Mai},
   year = {2002},
   issn = {0166-3615},
   language = {Englisch},
   cr-category = {H.2.2 Database Management Physical Design},
   contact = {Bernhard.Mitschang@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Sharing data is an important aspect in distributed design environments and should be supported by an underlying system. Any synchronous access to data is conflict prone. Applying concurrency control and two phase commit are one option to be considered. But design processes also demand for cooperation between the designers. Negotiation about actions on the product under design and the early exchange of preliminary results are crucial issues. Controlled data access by itself does not fulfil all the needs for cooperation. We will present a new approach that relies on a concept and system model which integrates concurrent activities by a joint information space offering flexible protocols for cooperation on the shared objects. We will describe the customizability of the protocols to effectively support different cooperative scenarios.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2002-08&engl=0}
}
@article {ART-2001-06,
   author = {Matthias Gro{\ss}mann and Alexander Leonhardi and Bernhard Mitschang and Kurt Rothermel},
   title = {{A World Model for Location-Aware Systems}},
   journal = {Informatik},
   publisher = {Schweizerischer Verband der Informatikorganisationen SVI/FSI},
   volume = {8},
   number = {5},
   pages = {22--25},
   type = {Artikel in Zeitschrift},
   month = {Oktober},
   year = {2001},
   keywords = {Mobile Computing; Location-Aware Applications; Augmented World Model},
   language = {Englisch},
   cr-category = {H.2.3 Database Management Languages,     H.3.4 Information Storage and Retrieval Systems and Software},
   ee = {http://www.nexus.uni-stuttgart.de},
   contact = {matthias.grossmann@informatik.uni-stuttgart.de, alexander.leonhardi@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Verteilte Systeme;     Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {Advanced location-aware applications require a detailed model of the real world. The goal of the Nexus platform is to provide such a model together with generic functionality to a wide variety of location-aware applications. In this paper, we describe the characteristics of this Augmented World Model and the architecture of the Nexus platform. We look in more detail at the two main components responsible for the main aspects of the world model, namely the spatial data and the position information of mobile objects.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2001-06&engl=0}
}
@article {ART-2000-06,
   author = {Aiko Frank and J{\"u}rgen Sellentin and Bernhard Mitschang},
   title = {{TOGA - a customizable service for data-centric collaboration}},
   journal = {Information Systems, Data Bases: Their Creation Management and Utilization, Special Issue: The 11th International Conference on Advanced Information Systems Engineering (CAiSE*99)},
   editor = {Matthias Jarke and Dennis Shasha},
   address = {Oxford},
   publisher = {Elsevier Science Ltd.},
   volume = {25},
   number = {2},
   pages = {157--176},
   type = {Artikel in Zeitschrift},
   month = {April},
   year = {2000},
   keywords = {Concurrent Engineering; Collaboration, CSCW, Events},
   language = {Englisch},
   cr-category = {D.2.12 Software Engineering Interoperability,     H.2.4 Database Management Systems,     H.3.4 Information Storage and Retrieval Systems and Software,     H.4.1 Office Automation},
   contact = {Contact mitsch@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {Collaboration in cooperative information systems, like concurrent design and engineering, exploits common work and information spaces. In this paper we introduce the TOGA service (Transaction-Oriented Group and Coordination Service for Data-Centric Applications), which offers group management facilities and a push model for change propagation w.r.t. shared data, thus allowing for group awareness. Through TOGA’s customizability and its layered architecture the service can be adapted to a variety of different collaboration scenarios. Multiple communication protocols (CORBA, UDP/IP, TCP/IP) are supported as well as basic transaction properties. Our approach enables both, firstly, the evolution of a set of separate applications to form a cooperative information system, i.e., it provides a technique towards component-oriented system engineering. Secondly, it can be exploited as a basic service within collaboration frameworks to effectively manage common work and information spaces. In this paper we report on design issues, implementation aspects, and first experiences gained with the TOGA prototype and its exploitation in an activity coordination and collaboration framework system.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2000-06&engl=0}
}
@article {ART-1999-03,
   author = {J{\"u}rgen Sellentin and Bernhard Mitschang},
   title = {{Data-Intensive Intra- and Internet Applications: Experiences using Java and Corba in the World Wide Web}},
   journal = {Object-oriented technology in advanced applications},
   editor = {E. Bertino and S. Urban},
   address = {New York},
   publisher = {John Wiley},
   volume = {5},
   number = {3},
   pages = {181--197},
   type = {Artikel in Zeitschrift},
   month = {Oktober},
   year = {1999},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems,     H.3.5 Online Information Services},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {Data-Intensive Intra- and Internet Applications: Experiences using Java and Corba in the World Wide Web.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-1999-03&engl=0}
}
@article {ART-1998-10,
   author = {Stefan De{\ss}loch and Theo H{\"a}rder and Nelson Mattos and Bernhard Mitschang and Joachim Thomas},
   title = {{Advanced Data Processing in KRISYS: Modeling Concepts, Implementation Techniques, and Client/Server Issues}},
   journal = {VLDB Journal},
   publisher = {Springer},
   volume = {7},
   number = {2},
   pages = {79--95},
   type = {Artikel in Zeitschrift},
   month = {Mai},
   year = {1998},
   keywords = {Object-oriented modeling concepts; Consistency control; Query processing; Run-time optimization; Client/server architectures},
   language = {Englisch},
   cr-category = {H Information Systems},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner, Anwendersoftware},
   abstract = {The increasing power of modern computers is steadily opening up new application domains for advanced data processing such as engineering and knowledge-based applications. To meet their requirements, concepts for advanced data management have been investigated during the last decade, especially in the field of object orientation. Over the last couple of years, the database group at the University of Kaiserslautern has been developing such an advanced database system, the KRISYS prototype. In this article, we report on the results and experiences obtained in the course of this project. The primary objective for the first version of KRISYS was to provide semantic features, such as an expressive data model, a set-oriented query language, deductive as well as active capabilities. The first KRISYS prototype became completely operational in 1989. To evaluate its features and to stabilize its functionality, we started to develop several applications with the system. These experiences marked the starting point for an overall redesign of KRISYS. Major goals were to tune KRISYS and its query-processing facilities to a suitable client/server environment, as well as to provide elaborate mechanisms for consistency control comprising semantic integrity constraints, multi-user synchronization, and failure recovery. The essential aspects of the resulting client/server architecture are embodied by the client-side data management needed to effectively support advanced applications and to gain the required system performance for interactive work. The project stages of KRISYS properly reflect the essential developments that have taken place in the research on advanced database systems over the last years. Hence, the subsequent discussions will bring up a number of important aspects with regard to advanced data processing that are of significant general importance, as well as of general applicability to database systems.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-1998-10&engl=0}
}
@inbook {INBOOK-2019-03,
   author = {Christoph Stach and Frank Steimle and Bernhard Mitschang},
   title = {{How to Realize Device Interoperability and Information Security in mHealth Applications}},
   series = {Biomedical Engineering Systems and Technologies},
   address = {Cham},
   publisher = {Springer Nature},
   series = {Communications in Computer and Information Science},
   volume = {1024},
   pages = {213--237},
   type = {Beitrag in Buch},
   month = {August},
   year = {2019},
   isbn = {978-3-030-29195-2},
   doi = {10.1007/978-3-030-29196-9_12},
   keywords = {mHealth; Device interoperability; Information security; COPD},
   language = {Englisch},
   cr-category = {H.5.0 Information Interfaces and Presentation General,     K.6.5 Security and Protection,     K.8 Personal Computing},
   contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {More and more people suffer from chronic diseases such as the chronic obstructive pulmonary disease (COPD). This leads to very high treatment costs every year, as such patients require a periodic screening of their condition. However, many of these checks can be performed at home by the patients themselves. This enables physicians to focus on actual emergencies. Modern smart devices such as Smartphones contribute to the success of these telemedical approaches. So-called mHealth apps combine the usability and versatility of Smartphones with the high accuracy and reliability of medical devices for home use. However, patients often face the problem of how to connect medical devices to their Smartphones (the device interoperability problem). Moreover, many patients reject mHealth apps due to the lack of control over their sensitive health data (the information security problem). In our work, we discuss the usage of the Privacy Management Platform (PMP) to solve these problems. So, we describe the structure of mHealth apps and present a real-world COPD application. From this application we derive relevant functions of an mHealth app, in which device interoperability or information security is an issue. We extend the PMP in order to provide support for these recurring functions. Finally, we evaluate the utility of these PMP extensions based on the real-world mHealth app.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2019-03&engl=0}
}
@inbook {INBOOK-2019-02,
   author = {Christoph Stach and Bernhard Mitschang},
   title = {{Elicitation of Privacy Requirements for the Internet of Things Using ACCESSORS}},
   series = {Information Systems Security and Privacy},
   address = {Cham},
   publisher = {Springer Nature},
   series = {Communications in Computer and Information Science},
   volume = {977},
   pages = {40--65},
   type = {Beitrag in Buch},
   month = {Juli},
   year = {2019},
   isbn = {978-3-030-25108-6},
   doi = {10.1007/978-3-030-25109-3_3},
   keywords = {Permission model; Data-centric; Derivation transparent; Fine-grained; Context-sensitive; Internet of Things; PMP; PATRON},
   language = {Englisch},
   cr-category = {K.4.1 Computers and Society Public Policy Issues,     D.4.6 Operating Systems Security and Protection},
   contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Novel smart devices are equipped with various sensors to capture context data. The Internet of Things (IoT) connects these devices with each other in order to bring together data from various domains. Due to the IoT, new application areas come up continuously. For instance, the quality of life and living can be significantly improved by installing connected and remote-controlled devices in Smart Homes. Or the treatment of chronic diseases can be made more convenient for both, patients and physicians, by using Smart Health technologies. For this, however, a large amount of data has to be collected, shared, and combined. This gathered data provides detailed insights into the user of the devices. Therefore, privacy is a key issue for such IoT applications. As current privacy systems for mobile devices focus on a single device only, they cannot be applied to a distributed and highly interconnected environment as the IoT. Therefore, we determine the special requirements towards a permission models for the IoT. Based on this requirements specification, we introduce ACCESSORS, a data-centric permission model for the IoT and describe how to apply such a model to two promising privacy systems for the IoT, namely the Privacy Management Platform (PMP) and PATRON.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2019-02&engl=0}
}
@inbook {INBOOK-2017-04,
   author = {Laura Kassner and Christoph Gr{\"o}ger and Jan K{\"o}nigsberger and Eva Hoos and Cornelia Kiefer and Christian Weber and Stefan Silcher and Bernhard Mitschang},
   title = {{The Stuttgart IT Architecture for Manufacturing}},
   series = {Enterprise Information Systems: 18th International Conference, ICEIS 2016, Rome, Italy, April 25--28, 2016, Revised Selected Papers},
   publisher = {Springer International Publishing},
   series = {Lecture Notes in Business Information Processing},
   volume = {291},
   pages = {53--80},
   type = {Beitrag in Buch},
   month = {Juni},
   year = {2017},
   isbn = {978-3-319-62386-3},
   doi = {10.1007/978-3-319-62386-3_3},
   language = {Englisch},
   cr-category = {H.4.0 Information Systems Applications General,     D.2.12 Software Engineering Interoperability,     J.2 Physical Sciences and Engineering},
   ee = {https://link.springer.com/chapter/10.1007/978-3-319-62386-3_3},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The global conditions for manufacturing are rapidly changing towards shorter product life cycles, more complexity and more turbulence. The manufacturing industry must meet the demands of this shifting environment and the increased global competition by ensuring high product quality, continuous improvement of processes and increasingly flexible organization. Technological developments towards smart manufacturing create big industrial data which needs to be leveraged for competitive advantages. We present a novel IT architecture for data-driven manufacturing, the Stuttgart IT Architecture for Manufacturing (SITAM). It addresses the weaknesses of traditional manufacturing IT by providing IT systems integration, holistic data analytics and mobile information provisioning. The SITAM surpasses competing reference architectures for smart manufacturing because it has a strong focus on analytics and mobile integration of human workers into the smart production environment and because it includes concrete recommendations for technologies to implement it, thus filling a granularity gap between conceptual and case-based architectures. To illustrate the benefits of the SITAM{\^a}€™s prototypical implementation, we present an application scenario for value-added services in the automotive industry.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2017-04&engl=0}
}
@inbook {INBOOK-2016-01,
   author = {Pascal Hirmer and Bernhard Mitschang},
   title = {{FlexMash - Flexible Data Mashups Based on Pattern-Based Model Transformation}},
   series = {Rapid Mashup Development Tools},
   publisher = {Springer International Publishing},
   series = {Communications in Computer and Information Science},
   volume = {591},
   pages = {12--30},
   type = {Beitrag in Buch},
   month = {Februar},
   year = {2016},
   isbn = {978-3-319-28726-3},
   doi = {10.1007/978-3-319-28727-0_2},
   keywords = {ICWE rapid mashup challenge 2015; Data mashups; Transformation patterns; TOSCA; Cloud computing},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications,     H.3.0 Information Storage and Retrieval General,     E.1 Data Structures},
   ee = {http://dx.doi.org/10.1007/978-3-319-28727-0_2},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Today, the ad-hoc processing and integration of data is an important issue due to fast growing IT systems and an increased connectivity of the corresponding data sources. The overall goal is deriving high-level information based on a huge amount of low-level data. However, an increasing amount of data leads to high complexity and many technical challenges. Especially non-IT expert users are overburdened with highly complex solutions such as Extract-Transform-Load processes. To tackle these issues, we need a means to abstract from technical details and provide a flexible execution of data processing and integration scenarios. In this paper, we present an approach for modeling and pattern-based execution of data mashups based on Mashup Plans, a domain-specific mashup model that has been introduced in previous work. This non-executable model can be mapped onto different executable ones depending on the use case scenario. The concepts introduced in this paper were presented during the Rapid Mashup Challenge at the International Conference on Web Engineering 2015. This paper presents our approach, the scenario that was implemented for this challenge, as well as the encountered issues during its preparation.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2016-01&engl=0}
}
@inbook {INBOOK-2015-02,
   author = {Eva Hoos and Christoph Gr{\"o}ger and Stefan Kramer and Bernhard Mitschang},
   title = {{ValueApping: An Analysis Method to Identify Value-Adding Mobile Enterprise Apps in Business Processes}},
   series = {Enterprise Information Systems},
   publisher = {Springer International Publishing},
   series = {Lecture Notes in Business Information Processing},
   volume = {227},
   type = {Beitrag in Buch},
   month = {September},
   year = {2015},
   language = {Englisch},
   cr-category = {H.1.1 Systems and Information Theory},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Mobile enterprise apps provide novel possibilities for the optimization and redesign of business processes, e.g., by the elimination of paper-based data acquisitioning or ubiquitous access to up-to-date information. To leverage these business potentials, a critical success factor is the identification and evaluation of valueadding MEAs based on an analysis of the business process. For this purpose, we present ValueApping, a systematic analysis method to identify usage scenarios for value-adding mobile enterprise apps in business processes and to analyze their business benefits. We describe the different analysis steps and corresponding analysis artifacts of ValueApping and discuss the results of a case-oriented evaluation in the automotive industry.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2015-02&engl=0}
}
@inbook {INBOOK-2013-02,
   author = {Sylvia Radesch{\"u}tz and Holger Schwarz and Marko Vrhovnik and Bernhard Mitschang},
   title = {{A Combination Framework for Exploiting the Symbiotic Aspects of Process and Operational Data in Business Process Optimization}},
   series = {Information Reuse and Integration in Academia and Industry},
   publisher = {Springer},
   pages = {29--49},
   type = {Beitrag in Buch},
   month = {September},
   year = {2013},
   language = {Deutsch},
   cr-category = {H.2 Database Management},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {A profound analysis of all relevant business data in a company is necessary for optimizing business processes effectively. Current analyses typically run either on business process execution data or on operational business data. Correlations among the separate data sets have to be found manually under big effort. However, to achieve a more informative analysis and to fully optimize a company's business, an efficient consolidation of all major data sources is indispensable. Recent matching algorithms are insufficient for this task since they are restricted either to schema or to process matching. We present a new matching framework to (semi-)automatically combine process data models and operational data models for performing such a profound business analysis. We describe the algorithms and basic matching rules underlying this approach as well as an experimental study that shows the achieved high recall and precision.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2013-02&engl=0}
}
@inbook {INBOOK-2013-01,
   author = {Stefan Silcher and Max Dinkelmann and Jorge Minguez and Bernhard Mitschang},
   title = {{Advanced Product Lifecycle Management by Introducing Domain-Specific Service Buses}},
   series = {Enterprise Information Systems},
   publisher = {Springer Berlin Heidelberg},
   series = {Lecture Notes in Business Information Processing},
   volume = {141},
   pages = {92--107},
   type = {Beitrag in Buch},
   month = {Oktober},
   year = {2013},
   doi = {10.1007/978-3-642-40654-6_6},
   isbn = {978-3-642-40653-9 (Print), 978-3-642-40654-6 (Online)},
   keywords = {Product lifecycle management; Service-oriented architecture; Enterprise service bus; Modular IT integration},
   language = {Englisch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     D.2.13 Software Engineering Reusable Software},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Manufacturing companies are operating today in a turbulent market. Permanently changing preconditions forces the companies to continuously adapt their business and production processes to get the optimal productivity. Therefore, a vast number of IT systems are introduced to support tasks along the product life cycle. These systems are typically isolated and their communication, cooperation and in special cases also integration results in more and more overhead and gets quickly unmanageable. Further problems arise, when building continuous processes within the Product Lifecycle Management (PLM). The service-based PLM architecture faces these challenges and presents a homogeneous integration approach based on Enterprise Service Bus (ESB) technology. The characteristics and findings of our approach are presented and the inclusion of security features is discussed. A proof-of-concept for the production planning and the corresponding Production Planning Service Bus are presented. Finally, the advantages of the service-based approach compared to traditional integration solutions are pointed out.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2013-01&engl=0}
}
@inbook {INBOOK-2012-01,
   author = {Stefan Silcher and Jorge Minguez and Bernhard Mitschang},
   title = {{A Novel Approach to Product Lifecycle Management based on Service Hierarchies}},
   series = {Recent Trends in Information Reuse and Integration},
   address = {Vienna},
   publisher = {Springer},
   pages = {343--362},
   type = {Beitrag in Buch},
   month = {Januar},
   year = {2012},
   isbn = {978-3-7091-0738-6},
   keywords = {Product Lifecycle Management; Service Oriented Architecture; Enterprise Service Bus},
   language = {Englisch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     D.2.13 Software Engineering Reusable Software},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {In grown IT infrastructures for Product Lifecycle Management (PLM), applications are mostly interconnected using point-to-point interfaces. This leads to complex and unmanageable infrastructures. A continuous and efficient integration is a key requirement for successful PLM implementations. The Service Oriented Architecture (SOA) is a prevalent solution to efficiently integrate legacy applications and systems into business processes. Its possibility for loose coupling of services enables the replacement of point-to-point interfaces, this way reducing the complexity of managing and maintaining the IT infrastructure. This article introduces a SOA-based solution to the integration of all PLM phases. We employ an Enterprise Service Bus (ESB) as service-based integration and communication infrastructure and introduce three exemplary scenarios to illustrate the benefits of using an ESB as compared to alternative PLM infrastructures. Furthermore, we describe a service hierarchy that extends PLM functionality with value-added services by mapping business processes to data integration services.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2012-01&engl=0}
}
@inbook {INBOOK-2009-04,
   author = {Rodrigo Salvador Monteiro and Geraldo Zimbr{\~a}o and Jano Moreira de Souza and Holger Schwarz and Bernhard Mitschang},
   title = {{Exploring Calendar-based Pattern Mining in Data Streams}},
   series = {Complex Data Warehousing and Knowledge Discovery for Advanced Retrieval Development: Innovative Methods and Applications},
   publisher = {IGI Global},
   pages = {1--30},
   type = {Beitrag in Buch},
   month = {Juni},
   year = {2009},
   isbn = {978-1-60566-748-5},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Finally, Chapter XVI introduces a calendar-based pattern mining that aims at identifying patterns on specific calendar partitions in continuous data streams. The authors present how a data warehouse approach can be applied to leverage calendar-based pattern mining in data streams and how the framework of the DWFIST approach can cope with tight time constraints imposed by data streams, keep storage requirements at a manageable level and, at the same time, support calendar-based frequent itemset mining. The minimum granularity of analysis, parameters of the data warehouse (e.g. mining minimum support) and parameters of the database (e.g. extent size) provide ways to tune the load performance.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2009-04&engl=0}
}
@inbook {INBOOK-2009-01,
   author = {Cataldo Mega and Kathleen Krebs and Frank Wagner and Norbert Ritter and Bernhard Mitschang},
   title = {{Content-Management-Systeme der n{\"a}chsten Generation}},
   series = {Wissens- und Informationsmanagement ; Strategien, Organisation und Prozesse},
   address = {Wiesbaden},
   publisher = {Gabler Verlag},
   pages = {539--567},
   type = {Beitrag in Buch},
   month = {Januar},
   year = {2009},
   isbn = {978-3-8349-0937-4},
   language = {Deutsch},
   cr-category = {H.3.2 Information Storage},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {...},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2009-01&engl=0}
}
@inbook {INBOOK-2008-02,
   author = {Clemens Dorda and Uwe Heinkel and Bernhard Mitschang},
   title = {{A Concept for Applying Model-driven Engineering to Application Integration}},
   series = {Challenges In Information Technology Management},
   address = {Singapur},
   publisher = {World Scientific},
   pages = {168--174},
   type = {Beitrag in Buch},
   month = {Mai},
   year = {2008},
   isbn = {139789812819062},
   isbn = {109812819061},
   keywords = {Enterprise Application Integration, Model-Driven Engineering, Software Lifecycle, EAI, MDA, MDE, UML, Unified Modeling Language},
   language = {Englisch},
   cr-category = {D.2.2 Software Engineering Design Tools and Techniques,     D.2.13 Software Engineering Reusable Software,     I.6.5 Model Development},
   contact = {Clemens.Dorda@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Modern software for Enterprise Application Integration (EAI) provides tools for modeling integration scenarios. A drawback of these tools is the lack of functionality for exchanging or integrating models from different EAI products. Consequently, developers are only partially able to describe real heterogeneous IT environments. Our goal is to avoid the creation of these so-called ``integration islands''. For that purpose we present an approach which introduces an abstract view by technology-independent and multivendor-capable modeling for both development and maintenance. With this approach, we propose a toolset- and repository-based refinement of the abstract view to automate implementation with real products and deployment on real platforms.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2008-02&engl=0}
}
@inbook {INBOOK-2006-02,
   author = {Rodrigo Salvador Monteiro and Geraldo Zimbr{\~a}o and Holger Schwarz and Bernhard Mitschang and Jano Moreira De Souza},
   title = {{DWFIST: The Data Warehouse of Frequent Itemsets Tactics Approach}},
   series = {Processing and Managing Complex Data for Decision Support},
   publisher = {Idea Group Publishing},
   pages = {1--30},
   type = {Beitrag in Buch},
   month = {April},
   year = {2006},
   isbn = {1-59140-655-2},
   language = {Englisch},
   cr-category = {H.2.7 Database Administration,     H.2.8 Database Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {This chapter presents the core of the DWFIST approach, which is concerned with supporting the analysis and exploration of frequent itemsets and derived patterns, e.g. association rules, in transactional datasets. The goal of this new approach is to provide (1) flexible pattern-retrieval capabilities without requiring the original data during the analysis phase, and (2) a standard modeling for data warehouses of frequent itemsets allowing an easier development and reuse of tools for analysis and exploration of itemset-based patterns. Instead of storing the original datasets, our approach organizes frequent itemsets holding on different partitions of the original transactions in a data warehouse that retains sufficient information for future analysis. A running example for mining calendar-based patterns on data streams is presented. Staging area tasks are discussed and standard conceptual and logical schemas are presented. Properties of this standard modeling allow to retrieve frequent itemsets holding on any set of partitions along with upper and lower bounds on their frequency counts. Furthermore, precision guarantees for some interestingness measures of association rules are provided as well.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2006-02&engl=0}
}
@inbook {INBOOK-2005-01,
   author = {Bernhard Mitschang and Daniela Nicklas and Matthias Grossmann and Thomas Schwarz and Nicola H{\"o}nle},
   title = {{Federating Location-Based Data Services}},
   series = {Data Management in a Connected World: Essays Dedicated to Hartmut Wedekind on the Occasion of His 70th Birthday},
   address = {Berlin},
   publisher = {Springer-Verlag},
   series = {Lecture Notes in Computer Science},
   volume = {3551},
   pages = {17--35},
   type = {Beitrag in Buch},
   month = {Juni},
   year = {2005},
   isbn = {3-540-26295-4},
   keywords = {federation; context models; nexus; location-based services},
   language = {Englisch},
   cr-category = {H.2.4 Database Management Systems,     H.3.3 Information Search and Retrieval,     H.3.5 Online Information Services},
   ee = {http://www.nexus.uni-stuttgart.de,     http://dx.doi.org/10.1007/11499923_2},
   contact = {mitsch@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {With the emerging availability of small and portable devices which are able to determine their position and to communicate wirelessly, mobile and spatially-aware applications become feasible. These applications rely on information that is bound to locations and managed by so-called location-based data services. Based on a classification of location-based data services we introduce a service-oriented architecture that is built on a federation approach to efficiently support location-based applications.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2005-01&engl=0}
}
@inbook {INBOOK-2003-03,
   author = {Carmen Constantinescu and Uwe Heinkel and Ralf Rantzau and Bernhard Mitschang},
   title = {{A System For Data Change Propagation In Heterogeneous Information Systems}},
   series = {Enterprise Information Systems IV},
   address = {Dordrecht, Netherlands},
   publisher = {Kluwer Academic Publishers},
   pages = {51--59},
   type = {Beitrag in Buch},
   month = {Januar},
   year = {2003},
   isbn = {1-4020-1086-9},
   keywords = {enterprise application integration; manufacturing; repository; propagation},
   language = {Englisch},
   cr-category = {H.3.4 Information Storage and Retrieval Systems and Software},
   contact = {uwe.heinkel@informatik.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Today, it is common that enterprises manage several mostly heterogeneous information systems to supply their production and business processes with data. There is a need to exchange data between the information systems while preserving system autonomy. Hence, an integration approach that relies on a single global enterprise data schema is ruled out. This is also due to the widespread usage of legacy systems. We propose a system, called Propagation Manager, which manages dependencies between data objects stored in different information systems. A script specifying complex data transformations and other sophisticated activities, like the execution of external programs, is associated with each dependency. For example, an object update in a source system can trigger data transformations of the given source data for each destination system that depends on the object. Our system is implemented using current XML technologies. We present the architecture and processing model of our system and demonstrate the benefit of our approach by illustrating an extensive example scenario.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2003-03&engl=0}
}
@proceedings {PROC-2017-05,
   editor = {Bernhard Mitschang and Norbert Ritter and Holger Schwarz and Meike Klettke and Andreas Thor and Oliver Kopp and Matthias Wieland},
   title = {{Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2017), 17. Fachtagung des GI-Fachbereichs ``Datenbanken und Informationssysteme'' (DBIS)}},
   publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {LNI},
   volume = {P266},
   pages = {410},
   type = {Tagungsband},
   month = {M{\"a}rz},
   year = {2017},
   isbn = {978-3-88579-660-2},
   language = {Englisch},
   cr-category = {H.4.1 Office Automation},
   ee = {http://dblp.org/db/conf/btw/btw2017w.html},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme},
   abstract = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2017) -- Workshopband},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=PROC-2017-05&engl=0}
}
@proceedings {PROC-2017-04,
   editor = {Bernhard Mitschang and Daniela Nicklas and Frank Leymann and Harald Sch{\"o}ning and Melanie Herschel and Jens Teubner and Theo H{\"a}rder and Oliver Kopp and Matthias Wieland},
   title = {{Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2017), 17. Fachtagung des GI-Fachbereichs ``Datenbanken und Informationssysteme'' (DBIS)}},
   publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {LNI},
   volume = {P265},
   pages = {637},
   type = {Tagungsband},
   month = {M{\"a}rz},
   year = {2017},
   isbn = {978-3-88579-659-6},
   language = {Englisch},
   cr-category = {H.4.1 Office Automation},
   ee = {http://dblp.org/db/conf/btw/btw2017.html},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme},
   abstract = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2017)},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=PROC-2017-04&engl=0}
}
@proceedings {PROC-2011-01,
   editor = {Theo H{\"a}rder and Wolfgang Lehner and Bernhard Mitschang and Harald Sch{\"o}ning and Holger Schwarz},
   title = {{Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2011)}},
   publisher = {GI},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {760},
   type = {Tagungsband},
   month = {Februar},
   year = {2011},
   isbn = {978-3-88579-274-1},
   language = {Deutsch},
   cr-category = {H.2 Database Management},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme},
   abstract = {The ”BTW” is a biennial conference series focusing on a broad range of topics addressing database management for Business, Technology, and Web. BTW 2011 as its 14th event took place in Kaiserslautern from March 2nd to 4th. This volume contains 24 long and 6 short papers selected for presentation at the conference, 9 industrial contributions, 3 papers or abstracts for the invited talks, 12 demonstration proposals, a panel description, and a paper written by the winner of the dissertation award. The subject areas include core database technology such as query optimization and indexing, DBMS-related prediction models, data streams, processing of large data sets, Web-based information extraction, benchmarking and simulation, and others.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=PROC-2011-01&engl=0}
}