Bild von Institut mit Unilogo
home uni uni suche suche sitemap sitemap kontakt kontakt
unilogo Universität Stuttgart

Institut für Architektur von Anwendungssystemen : Veröffentlichungen

Bibliographie 2016 BibTeX

suche englishicon
 
@inproceedings {INPROC-2016-48,
   author = {Jasmin Guth and Uwe Breitenb{\"u}cher and Michael Falkenthal and Frank Leymann and Lukas Reinfurt},
   title = {{Comparison of IoT Platform Architectures: A Field Study based on a Reference Architecture}},
   booktitle = {Cloudification of the Internet of Things (CIoT)},
   publisher = {IEEE},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--6},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2016},
   doi = {10.1109/CIOT.2016.7872918},
   keywords = {IoT; CPS; Reference Architecture; OpenMTC; FIWARE; SiteWhere; AWS IoT},
   language = {Englisch},
   cr-category = {C.3 Special-Purpose and Application-Based Systems,     D.2.11 Software Engineering Software Architectures},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {The Internet of Things (IoT) is gaining increasing attention. The overall aim is to interconnect the physical with the digital world. Therefore, the physical world needs to be measured and translated into processible data. Further, data has to be translated into commands to be executed by actuators. Due to the growing awareness of IoT, the amount of offered IoT platforms rises as well. The heterogeneity of IoT platforms is the consequence of multiple different standards and approaches. This leads to problems of comprehension, which can occur during the design up to the selection of an appropriate solution. We tackle these issues by introducing an IoT reference architecture based on several state-of-the-art IoT platforms. Furthermore, the reference architecture is compared to three open-source and one proprietary IoT platform. The comparison shows that the reference architecture provides a uniform basis to understand, compare, and evaluate different IoT solutions. The considered state-of-the-art IoT platforms are OpenMTC, FIWARE, Site-Where, and Amazon Web Services IoT.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-48&engl=0}
}
@inproceedings {INPROC-2016-47,
   author = {Michael Falkenthal and Johanna Barzen and Uwe Breitenb{\"u}cher and Christoph Fehling and Frank Leymann and Aristotelis Hadjakos and Frank Hentschel and Heizo Schulze},
   title = {{Leveraging Pattern Applications via Pattern Refinement}},
   booktitle = {Pursuit of Pattern Languages for Societal Change (PURPLSOC)},
   editor = {Peter Baumgartner and Tina Gruber-Muecke and Richard Sickinger},
   address = {Krems},
   publisher = {epubli GmbH},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {38--61},
   type = {Konferenz-Beitrag},
   month = {Oktober},
   year = {2016},
   keywords = {Pattern Refinement; Pattern Application; Cloud Computing Patterns; Costume Patterns},
   language = {Englisch},
   cr-category = {C.0 Computer Systems Organization, General,     D.2.2 Software Engineering Design Tools and Techniques,     D.2.3 Software Engineering Coding Tools and Techniques,     C.2.4 Distributed Systems,     D.2.7 Software Engineering Distribution, Maintenance, and Enhancement},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {In many domains, patterns are a well-established concept to capture proven solutions for frequently reoccurring problems. Patterns aim at capturing knowledge gathered from experience at an abstract level so that proven concepts can be applied to a variety of concrete, individual occurrences of the general problem. While this principle makes a pattern very reusable, it opens up a gap between the (i) captured abstract knowledge and the (ii) concrete actions required to solve a problem at hand. This often results in huge efforts that have to be spent when applying a pattern as its abstract solution has to be refined for the actual, concrete use cases each time it is applied. In this work, we present an approach to bridge this gap in order to support, guide, and ease the application of patterns. We introduce a concept that supports capturing and organizing patterns at different levels of abstraction in order to guide their refinement towards concretized solutions. To show the feasibility of the presented approach, we show how patterns detailing knowledge at different levels of abstraction in the domain of information technology are interrelated in order to ease the labor-intensive application of abstract patterns to concrete use cases. Finally, we sketch a vision of a pattern language for films, which is based on the presented concept.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-47&engl=0}
}
@inproceedings {INPROC-2016-46,
   author = {Lukas Reinfurt and Uwe Breitenb{\"u}cher and Michael Falkenthal and Frank Leymann and Andreas Riegg},
   title = {{Internet of Things Patterns}},
   booktitle = {Proceedings of the 21st European Conference on Pattern Languages of Programs (EuroPLoP)},
   publisher = {ACM},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--21},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2016},
   keywords = {Internet of Things; Design Patterns; Cyber-Physical Systems},
   language = {Englisch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     C.2.4 Distributed Systems},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {The development of the Internet of Things is gaining more and more momentum. Due to its widespread applicability, many different solutions have been created in all kinds of areas and contexts. These include solutions for building automation, industrial manufacturing, logistics and mobility, healthcare, or public utilities, for private consumers, businesses, or government. These solutions often have to deal with similar problems, for example, constrained devices, intermittent connectivity, technological heterogeneity, or privacy and security concerns. But the diversity makes it hard to grasp the underlying principles, to compare different solutions, and to design an appropriate custom implementation in the Internet of Things space. We investigated a large number of production-ready Internet of Things offerings to extract recurring proven solution principles into Patterns, of which five are presented in this paper. These Patterns address several problems. DEVICE GATEWAY shows how to connect devices to a network that do not support the network's technology. DEVICE SHADOW explains how to interact with currently offline devices. With a RULES ENGINE, you can create simple processing rules without programming. DEVICE WAKEUP TRIGGER allows you to get a disconnected device to reconnect to a network when needed. REMOTE LOCK AND WIPE can secure devices and their data in case of loss.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-46&engl=0}
}
@inproceedings {INPROC-2016-40,
   author = {Michael Falkenthal and Uwe Breitenb{\"u}cher and K{\'a}lm{\'a}n K{\'e}pes and Frank Leymann and Michael Zimmermann and Maximilian Christ and Julius Neuffer and Nils Braun and Andreas W. Kempa-Liehr},
   title = {{OpenTOSCA for the 4th Industrial Revolution: Automating the Provisioning of Analytics Tools Based on Apache Flink}},
   booktitle = {Proceedings of the 6th International Conference on the Internet of Things},
   publisher = {ACM},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {179--180},
   type = {Demonstration},
   month = {Oktober},
   year = {2016},
   keywords = {4th Industrial Revolution; Cyber-Physical Systems; Apache Flink; Data Mock Services; Machine Learning; TOSCA},
   language = {Englisch},
   cr-category = {K.6 Management of Computing and Information Systems,     D.2.6 Software Engineering Programming Environments},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {The 4th industrial revolution entails new levels of data driven value chain organization and management. In industrial environments, the optimization of whole production lines based on machine learning algorithms allow to generate huge business value. Still, one of the open challenges is how to process the collected data as close to the data sources as possible. To fill this gap, this paper presents an OpenTOSCA-based toolchain that is capable of automatically provisioning Apache Flink as a holistic analytics environment altogether with specialized machine learning algorithms. This stack can be deployed as close to the production line as possible to enable data driven optimization. Further, we demonstrate how the analytics stack can be modeled based on TOSCA to be automatically provisioned considering specific mock services to simulate machine metering in the development phase of the algorithms.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-40&engl=0}
}
@inproceedings {INPROC-2016-39,
   author = {Ana Cristina Franco da Silva and Uwe Breitenb{\"u}cher and K{\'a}lm{\'a}n K{\'e}pes and Oliver Kopp and Frank Leymann},
   title = {{OpenTOSCA for IoT: Automating the Deployment of IoT Applications based on the Mosquitto Message Broker}},
   booktitle = {Proceedings of the 6th International Conference on the Internet of Things (IoT)},
   address = {Stuttgart},
   publisher = {ACM},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {181--182},
   type = {Demonstration},
   month = {November},
   year = {2016},
   isbn = {978-1-4503-4814-0/16/11},
   doi = {10.1145/2991561.2998464},
   keywords = {Internet of Things; Cyber-Physical Systems; Sensor Integration; Message Broker; Mosquitto; MQTT; TOSCA},
   language = {Englisch},
   cr-category = {K.6 Management of Computing and Information Systems,     D.2.12 Software Engineering Interoperability},
   contact = {For questions, feel free to contact me franco-da-silva@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {Automating the deployment of IoT applications is a complex challenge, especially if multiple heterogeneous sensors, actuators, and business components have to be integrated. This demonstration paper presents a generic, standards-based system that is able to fully automatically deploy IoT applications based on the TOSCA standard, the standardized MQTT messaging protocol, the Mosquitto message broker, and the runtime environment OpenTOSCA. We describe a demonstration scenario and explain in detail how this scenario can be deployed fully automatically using the mentioned technologies.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-39&engl=0}
}
@inproceedings {INPROC-2016-38,
   author = {Andreas Wei{\ss} and Vasilios Andrikopoulos and Santiago G{\'o}mez S{\'a}ez and Michael Hahn and Dimka Karastoyanova},
   title = {{ChorSystem: A Message-Based System for the Life Cycle Management of Choreographies}},
   booktitle = {On the Move to Meaningful Internet Systems: OTM 2016 Conferences: Confederated International Conferences: CoopIS, C\&TC, and ODBASE 2016, Rhodes, Greece, October 24-28, 2016, Proceedings},
   editor = {Christophe Debruyne and Herv{\'e} Panetto and Robert Meersman and Tharam Dillon and Eva K{\"u}hn and Declan O'Sullivan and Claudio Agostino Ardagna},
   publisher = {Springer International Publishing},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {503--521},
   type = {Konferenz-Beitrag},
   month = {Oktober},
   year = {2016},
   doi = {10.1007/978-3-319-48472-3_30},
   keywords = {Collaborative Dynamic Complex (CDC) Systems; Choreography Life Cycle Management; Flexible Choreographies},
   language = {Englisch},
   cr-category = {H.4.1 Office Automation},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2016-38/INPROC-2016-38.pdf},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {Service choreographies are commonly used as the means for enabling inter-organizational collaboration by providing a global view on the message exchange between involved participants. Choreographies are ideal for a number of application domains that are classi ed under the Collaborative, Dynamic \& Complex (CDC) systems area. System users in these application domains require facilities to control the execution of a choreography instance such as suspending, resuming or terminating, and thus actively control its life cycle. We support this requirement by introducing the ChorSystem, a system capable of managing the complete life cycle of choreographies from choreography modeling, through deployment, to execution and monitoring. The performance evaluation of the life cycle operations shows that the ChorSystem introduces an acceptable performance overhead compared to purely script-based scenarios, while gaining the abilities to control the choreography life cycle.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-38&engl=0}
}
@inproceedings {INPROC-2016-37,
   author = {Marigianna Skouradaki and Vasilios Andrikopoulos and Oliver Kopp and Frank Leymann},
   title = {{RoSE: Reoccurring Structures Detection in BPMN 2.0 Process Model Collections}},
   booktitle = {OTM Confederated International Conferences ''On the Move to Meaningful Internet Systems``},
   publisher = {Springer International Publishing},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {263--281},
   type = {Konferenz-Beitrag},
   month = {Oktober},
   year = {2016},
   isbn = {10.1007/978-3-319-48472-3_15},
   keywords = {BPMN 2.0; Process similarity; Graph matching; Structural similarity; Business process management},
   language = {Englisch},
   cr-category = {D.2.9 Software Engineering Management,     I.2.8 Problem Solving, Control Methods, and Search,     F.2.2 Nonnumerical Algorithms and Problems},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {The detection of structural similarities of process models is frequently discussed in the literature. The state-of-the-art approaches for structural similarities of process models presume a known subgraph that is searched in a larger graph, and utilize behavioral and textual semantics to achieve their goal. In this paper we propose an approach to detect reoccurring structures in a collection of BPMN2.0 process models, without the knowledge of a subgraph to be searched, and by focusing solely on the structural characteristics of the process models. The proposed approach deals with the problems of subgraph isomorphism, frequent pattern discovery and maximum common subgraph isomorphism, which are mentioned as NP-hard in the literature. In this work we present a formal model and a novel algorithm for the detection of reoccurring structures in a collection of BPMN 2.0 process models. We then apply the algorithm to a collection of 1,806 real-world process models and provide a quantitative and qualitative analysis of the results.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-37&engl=0}
}
@inproceedings {INPROC-2016-36,
   author = {Marigianna Skouradaki and Tayyaba Azad and Uwe Breitenb{\"u}cher and Oliver Kopp and Frank Leymann},
   title = {{A Decision Support System for the Performance Benchmarking of Workflow Management Systems}},
   booktitle = {Proceedings of the 10th Symposium and Summer School On Service-Oriented Computing, SummerSOC 2016},
   publisher = {IBM},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {41--57},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2016},
   keywords = {Decision Support System; Benchmarking; Workflow Managament Systems},
   language = {Englisch},
   cr-category = {H.4.1 Office Automation,     H.4.2 Information Systems Applications Types of Systems,     D.2 Software Engineering,     H.4.2 Information Systems Applications Types of Systems},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {Along with the growing popularity of the Workflow Manage- ment Systems, the performance and e ciency of their underlying technol- ogy becomes crucial for the business. The development of a representative benchmark for Workflow Management Systems is very challenging, as one needs to realistically stress the di erent underlying components. However, structured information on how to do so is generally missing. Thus, the users need to arbitrarily make crucial design decisions or to study complex standard benchmarks before designing a benchmark. In this work, we propose a Decision Support System to ease the decision making of the desigh of benchmarks for Workflow Management Systems. We present the conceptual models of the Decision Support System and provide a prototypical implementation of it. Finally, we validate the functionality of our implementation with representative use cases.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-36&engl=0}
}
@inproceedings {INPROC-2016-33,
   author = {Michael Falkenthal and Uwe Breitenb{\"u}cher and Maximilian Christ and Christian Endres and Andreas W. Kempa-Liehr and Frank Leymann and Michael Zimmermann},
   title = {{Towards Function and Data Shipping in Manufacturing Environments: How Cloud Technologies leverage the 4th Industrial Revolution}},
   booktitle = {Proceedings of the 10th Advanced Summer School on Service Oriented Computing},
   publisher = {IBM Research Report},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {IBM Research Report},
   pages = {16--25},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2016},
   keywords = {cyber-physical systems; data shipping; fourth industrial revolution; function shipping; tosca; industry 4.0},
   language = {Englisch},
   cr-category = {K.6 Management of Computing and Information Systems,     D.2.13 Software Engineering Reusable Software},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {Advances in the field of cloud computing and the Internet of Things are boosting the 4th industrial revolution. New research and developments foster the emergence of smart services, which augment conventional machinery to become smart cyber-physical systems. The resulting systems are characterized by providing preemptive functionality to automatically react on circumstances and changes in their physical environment. In this paper we sketch our vision of how to automatically provision smart services in manufacturing environments, whereby the paradigms of function and data shipping are specifically considered. To base this approach upon a clear understanding of influences, we point out key challenges in the context of smart services for Industry 4.0.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-33&engl=0}
}
@inproceedings {INPROC-2016-28,
   author = {C. Timurhan Sungur and Uwe Breitenb{\"u}cher and Oliver Kopp and Frank Leymann and Mozi Song and Andreas Wei{\ss} and Christoph Mayr-Dorn and Schahram Dustdar},
   title = {{Identifying Relevant Resources and Relevant Capabilities of Collaborations - A Case Study}},
   booktitle = {Proceedings of the 2016 IEEE 20th International Enterprise Distributed Object Computing Workshop (EDOCW)},
   publisher = {IEEE Computer Society},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {352--355},
   type = {Demonstration},
   month = {September},
   year = {2016},
   keywords = {Organizational performance; resource discovery; capability discovery; relevant resources; relevant capabilities; informal processes; unstructured processes},
   language = {Englisch},
   cr-category = {H.4.1 Office Automation,     H.3.3 Information Search and Retrieval,     H.3.4 Information Storage and Retrieval Systems and Software,     H.5.3 Group and Organization Interfaces},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {Organizational processes involving collaborating resources, such as development processes, innovation processes, and decision-making processes, typically affect the performance of many organizations. Moreover, including required but missing, resources and capabilities of collaborations can improve the performance of corresponding processes drastically. In this work, we demonstrate the extended Informal Process Execution (InProXec) method for identifying resources and capabilities of collaborations using a case study on the Apache jclouds project.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-28&engl=0}
}
@inproceedings {INPROC-2016-27,
   author = {K{\'a}lm{\'a}n K{\'e}pes and Uwe Breitenb{\"u}cher and Santiago G{\'o}mez S{\'a}ez and Jasmin Guth and Frank Leymann and Matthias Wieland},
   title = {{Situation-Aware Execution and Dynamic Adaptation of Traditional Workflow Models}},
   booktitle = {Proceedings of the 5th European Conference on Service-Oriented and Cloud Computing (ESOCC)},
   publisher = {Springer International Publishing},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {LNCS},
   volume = {9846},
   pages = {69--83},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2016},
   doi = {10.1007/978-3-319-44482-6_5},
   language = {Englisch},
   cr-category = {H.4.1 Office Automation},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {The continuous growth of the Internet of Things together with the complexity of modern information systems results in several challenges for modeling, provisioning, executing, and maintaining systems that are capable of adapting themselves to changing situations in dynamic environments. The properties of the workflow technology, such as its recovery features, makes this technology suitable to be leveraged in such environments. However, the realization of situation-aware mechanisms that dynamically adapt process executions to changing situations is not trivial and error prone, since workflow modelers cannot reflect all possibly occurring situations in complex environments in their workflow models. In this paper, we present a method and concepts to enable modelers to create traditional, situation-independent workflow models that are automatically transformed into situation-aware workflow models that cope with dynamic contextual situations. Our work builds upon the usage of workflow fragments, which are dynamically selected during runtime to cope with prevailing situations retrieved from low-level context sensor data. We validate the practical feasibility of our work by a prototypical implementation of a Situation-aware Workflow Management System (SaWMS) that supports the presented concepts.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-27&engl=0}
}
@inproceedings {INPROC-2016-26,
   author = {Michael Hahn and Dimka Karastoyanova and Frank Leymann},
   title = {{A Management Life Cycle for Data-Aware Service Choreographies}},
   booktitle = {Proceedings of the Twenty-Third International Conference on Web Services (ICWS 2016), San Francisco, CA, USA, 2016},
   publisher = {IEEE Computer Society},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {364--371},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2016},
   doi = {10.1109/ICWS.2016.54},
   keywords = {Choreography Management Life Cycle; Data Flow Optimization; Service Choreographies; Transparent Data Exchange},
   language = {Englisch},
   cr-category = {H.4.1 Office Automation,     C.2.4 Distributed Systems},
   contact = {Michael Hahn: michael.hahn@iaas.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {This work is motivated by the increasing importance and business value of data in the fields of business process management, scientific workflows as a field in eScience, and Internet of Things, all of which profiting from the recent advances in data science and Big data. We introduce a management life cycle that renders data as first-class citizen in service choreographies and defines the functions and artifacts necessary for enabling transparent and efficient data exchange among choreography participants. The inherent goal of the life cycle, functions and artifacts is to help decouple the data flow, data exchange and management from the control flow in service compositions and choreographies. This decoupling enables peer-to-peer data exchange in choreographies and provides the means for more sophisticated data management and exchange, as well as data exchange and provisioning optimization.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-26&engl=0}
}
@inproceedings {INPROC-2016-25,
   author = {Pascal Hirmer and Matthias Wieland and Uwe Breitenb{\"u}cher and Bernhard Mitschang},
   title = {{Dynamic Ontology-based Sensor Binding}},
   booktitle = {Advances in Databases and Information Systems. 20th East European Conference, ADBIS 2016, Prague, Czech Republic, August 28-31, 2016, Proceedings},
   address = {Prague, Czech Republic},
   publisher = {Springer International Publishing},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Information Systems and Applications, incl. Internet/Web, and HCI},
   volume = {9809},
   pages = {323--337},
   type = {Konferenz-Beitrag},
   month = {August},
   year = {2016},
   isbn = {978-3-319-44039-2},
   isbn = {978-3-319-44038-5},
   doi = {10.1007/978-3-319-44039-2},
   keywords = {Internet of Things; Sensors; Ontologies; Data Provisioning},
   language = {Englisch},
   cr-category = {E.0 Data General,     B.8 Performance and Reliability},
   ee = {http://www.springer.com/de/book/9783319440385},
   contact = {pascal.hirmer@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {In recent years, the Internet of Things gains more and more attention through cheap hardware devices and, consequently, an increased interconnection of them. These devices equipped with sensors and actuators form the foundation for so called smart environments that enable monitoring as well as self-organization. However, an efficient sensor registration, binding, and sensor data provisioning is still a major issue for the Internet of Things. Usually, these steps can take up to days or even weeks due to a manual configuration and binding by sensor experts that furthermore have to communicate with domain-experts that define the requirements, e.g. the types of sensors, for the smart environments. In previous work, we introduced a first vision of a method for automated sensor registration, binding, and sensor data provisioning. In this paper, we further detail and extend this vision, e.g., by introducing optimization steps to enhance efficiency as well as effectiveness. Furthermore, the approach is evaluated through a prototypical implementation.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-25&engl=0}
}
@inproceedings {INPROC-2016-24,
   author = {Alexander Bergmayr and Uwe Breitenb{\"u}cher and Oliver Kopp and Manuel Wimmer and Gerti Kappel and Frank Leymann},
   title = {{From Architecture Modeling to Application Provisioning for the Cloud by Combining UML and TOSCA}},
   booktitle = {Proceedings of the 6th International Conference on Cloud Computing and Services Science (CLOSER 2016)},
   publisher = {SCITEPRESS},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {97--108},
   type = {Konferenz-Beitrag},
   month = {April},
   year = {2016},
   doi = {10.5220/0005806900970108},
   isbn = {978-989-758-182-3},
   keywords = {TOSCA; UML; Model-Driven Software Engineering; Cloud Computing; Cloud Modeling},
   language = {Englisch},
   cr-category = {K.6 Management of Computing and Information Systems},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {Recent efforts to standardize a deployment modeling language for cloud applications resulted in TOSCA. At the same time, the software modeling standard UML supports architecture modeling from different viewpoints. Combining these standards from cloud computing and software engineering would allow engineers to refine UML architectural models into TOSCA deployment models that enable automatic provisioning of cloud applications. However, this refinement task is currently carried out manually by recreating TOSCA models from UML models because a conceptual mapping between the two languages as basis for an automated translation is missing. In this paper, we exploit cloud modeling extensions to UML called CAML as the basis for our approach CAML2TOSCA, which aims at bridging UML and TOSCA. The validation of our approach shows that UML models can directly be injected into a TOSCA-based provisioning process. As current UML modeling tools lack cloud-based refinement support for deployment models, the added value of CAML2TOSCA is emphasized because it provides the glue between architecture modeling and application provisioning.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-24&engl=0}
}
@inproceedings {INPROC-2016-23,
   author = {Marigianna Skouradaki and Vasilis Andrikopoulos and Frank Leymann},
   title = {{Representative BPMN 2.0 Process Models Generation from Recurring Structures}},
   booktitle = {Proceedings of the 23rd IEEE International Conference on Web Services, (ICWS 2016)},
   publisher = {IEEE},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {468--475},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2016},
   keywords = {BPMN 2.0; Business Process Management; collection; composition; generation; process model; representative},
   language = {Englisch},
   cr-category = {D.2.9 Software Engineering Management,     I.2.8 Problem Solving, Control Methods, and Search,     F.2.2 Nonnumerical Algorithms and Problems},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {Representative process models that satisfy specific structural criteria are requested in many different use cases. However, as process models constitute a corporate asset for the companies, they are not easily shared. More particularly, when the requestor desires a process models that satisfy specific structural characteristics, the task of gaining the process models becomes even harder. This work focuses on generating synthetic, representative, executable BPMN 2.0 process models with respect to specific user-defined structural criteria. For the generation of the BPMN 2.0 process models we are using re-curing sub-structures. The discovery of the sub structures has been introduced in previous work.The generated process models will then be utilized for benchmarking purposes. The original scientific contributions of this work are to provide: a) a method for automatically generating executable representative synthetic process models for a given set of structural criteria, b) the proof-of-concept of the proposed method through prototypical implementation and c) qualitative and quantitative evaluation of the proposed approach.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-23&engl=0}
}
@inproceedings {INPROC-2016-21,
   author = {C. Timurhan Sungur and Uwe Breitenb{\"u}cher and Frank Leymann and Matthias Wieland},
   title = {{Context-sensitive Adaptive Production Processes}},
   booktitle = {Proceedings of the 48th CIRP Conference on Manufacturing Systems},
   publisher = {Elsevier},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Procedia CIRP},
   volume = {41},
   pages = {147--152},
   type = {Konferenz-Beitrag},
   month = {Februar},
   year = {2016},
   doi = {10.1016/j.procir.2015.12.076},
   keywords = {Process; Automation; Optimization; Adaptation},
   language = {Englisch},
   cr-category = {H.4.1 Office Automation,     H.5.3 Group and Organization Interfaces},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {To stay competitive, manufacturing companies need to adapt their processes in a regular basis to the most recent conditions in their corresponding domains. These adaptations are typically the result of turbulences, such as changes in human resources, new technological advancements, or economic crises. Therefore, to increase the efficiency of production processes, (i) automation, (ii) optimization, and (iii) dynamic adaptation became the most important requirements in this field. In this work, we propose a novel process modelling and execution approach for creating self-organizing processes: Production processes are extended by context-sensitive execution steps, for which sub-processes are selected, elected, optimized, and finally executed on runtime. During the election step, the most desired solution is chosen and optimized based on selection and optimization strategies of the respective processes. Moreover, we present a system architecture for modelling and executing these context-sensitive production processes.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-21&engl=0}
}
@inproceedings {INPROC-2016-19,
   author = {Frank Leymann and Christoph Fehling and Sebastian Wagner and Johannes Wettinger},
   title = {{Native Cloud Applications: Why Virtual Machines, Images and Containers Miss the Point!}},
   booktitle = {Proceedings of the 6th International Conference on Cloud Computing and Service Science (CLOSER 2016)},
   address = {Rome},
   publisher = {SciTePress},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {7--15},
   type = {Konferenz-Beitrag},
   month = {April},
   year = {2016},
   keywords = {Cloud Computing; Virtualization; Cloud Migration; SOA; Microservices; Continuous Delivery},
   language = {Englisch},
   cr-category = {D.2.9 Software Engineering Management,     D.2.11 Software Engineering Software Architectures,     K.6 Management of Computing and Information Systems,     H.4.1 Office Automation},
   contact = {leymann@iaas.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {Due to the current hype around cloud computing, the term {\^a}€œnative cloud application{\^a}€ becomes increasingly popular. It suggests an application to fully benefit from all the advantages of cloud computing. Many users tend to consider their applications as cloud native if the application is just bundled in a virtual machine image or a container. Even though virtualization is fundamental for implementing the cloud computing paradigm, a virtualized application does not automatically cover all properties of a native cloud application. In this work, we propose a definition of a native cloud application by specifying the set of characteristic architectural properties, which a native cloud application has to provide. We demonstrate the importance of these properties by introducing a typical scenario from current practice that moves an application to the cloud. The identified properties and the scenario especially show why virtualization alone is insufficient to build native cloud applications. Finally, we outline how native cloud applications respect the core principles of service-oriented architectures, which are currently hyped a lot in the form of microservice architectures.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-19&engl=0}
}
@inproceedings {INPROC-2016-15,
   author = {David Richard Sch{\"a}fer and Andreas Wei{\ss} and Muhammad Adnan Tariq and Vasilios Andrikopoulos and Santiago G{\'o}mez S{\'a}ez and Lukas Krawczyk and Kurt Rothermel},
   title = {{HAWKS: A System for Highly Available Executions of Workflows}},
   booktitle = {Proceedings of the 13th IEEE International Conference on Services Computing: SCC'16; San Francisco, California, USA, June 27-July 2, 2016},
   publisher = {IEEE},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {130--137},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2016},
   doi = {10.1109/SCC.2016.24},
   keywords = {SOA; workflows; availability; replication; performance},
   language = {Englisch},
   cr-category = {C.2.4 Distributed Systems,     C.4 Performance of Systems},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2016-15/INPROC-2016-15.pdf,     http://dx.doi.org/10.1109/SCC.2016.24},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen;     Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Verteilte Systeme},
   abstract = {The workflow technology is the de facto standard for managing business processes. Today, workflows are even used for automating interactions and collaborations between business partners, e.g., for enabling just-in-time production. Every workflow that is part of such a collaboration needs to be highly available. Otherwise, the business operations, e.g., the production, might be hindered or even stopped. Since today's business partners are scattered across the globe, the workflows are executed in a highly distributed and heterogeneous environment. Those environments are, however, failure-prone and, thus, providing availability is not trivial. In this work, we improve availability by replicating workflow executions, while ensuring that the outcome is the same as in a non-replicated execution. For making workflow replication easily usable with current workflow technology, we derive the requirements for modeling a workflow replication system. Then, we propose the HAWKS system, which adheres to the previously specified requirements and is compatible with current technology. We implement a proof-of-concept in the open-source workflow execution engine Apache ODE for demonstrating this compatibility. Finally, we extensively evaluate the impact of using HAWKS in terms of performance and availability in the presence of failures.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-15&engl=0}
}
@inproceedings {INPROC-2016-14,
   author = {Vincenzo Ferme and Ana Ivanckikj and Cesare Pautasso and Marigianna Skouradaki and Frank Leymann},
   title = {{A Container-centric Methodology for Benchmarking Workflow Management Systems}},
   booktitle = {Proceedings of the 6th International Conference on Cloud Computing and Service Science, (CLOSER 2016), Rome, Italy, April 22-24, 2016},
   publisher = {SciTePress},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {74--84},
   type = {Konferenz-Beitrag},
   month = {April},
   year = {2016},
   keywords = {Benchmarking; Docker Containers; Workflow Management Systems; Cloud Applications},
   language = {Englisch},
   cr-category = {C.2.4 Distributed Systems,     D.2.8 Software Engineering Metrics,     D.2.11 Software Engineering Software Architectures},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {Trusted benchmarks should provide reproducible results obtained following a transparent and well-defined process. In this paper, we show how Containers, originally developed to ease the automated deployment of Cloud application components, can be used in the context of a benchmarking methodology. The proposed methodology focuses on Workflow Management Systems (WfMSs), a critical service orchestration middleware, which can be characterized by its architectural complexity, for which Docker Containers offer a highly suitable approach. The contributions of our work are: 1) a new benchmarking approach taking full advantage of containerization technologies; and 2) the formalization of the interaction process with the WfMS vendors described clearly in a written agreement. Thus, we take advantage of emerging Cloud technologies to address technical challenges, ensuring the performance measurements can be trusted. We also make the benchmarking process transparent, automated, and repeatable so that WfMS vendors can join the benchmarking effort.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-14&engl=0}
}
@inproceedings {INPROC-2016-08,
   author = {Sebastian Wagner and Uwe Breitenb{\"u}cher and Frank Leymann},
   title = {{A Method For Reusing TOSCA-based Applications and Management Plans}},
   booktitle = {Proceedings of the 6th International Conference on Cloud Computing and Service Science (CLOSER 2016)},
   address = {Rome},
   publisher = {SciTePress},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {181--191},
   type = {Konferenz-Beitrag},
   month = {April},
   year = {2016},
   language = {Englisch},
   cr-category = {D.2.9 Software Engineering Management,     K.6 Management of Computing and Information Systems,     H.4.1 Office Automation},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {The automated provisioning and management of Cloud applications is supported by various general-purpose technologies that provide generic management functionalities such as scaling components or automatically redeploying parts of a Cloud application. However, if complex applications have to be managed, these technologies reach their limits and individual, application-specific processes must be created to automate the execution of holistic management tasks that cannot be implemented in a generic manner. Unfortunately, creating such processes from scratch is time-consuming, error-prone, and knowledge-intensive, thus, leading to inefficient developments of new applications. In this paper, we present an approach that tackles these issues by enabling the usage of choreographies to systematically combine available management workflows of existing application building blocks. Moreover, we show how these choreographies can be merged into single, executable workflows in order to enable their automated execution. To validate the approach, we apply the concept to the choreography language BPEL4Chor and the Cloud standard TOSCA. In addition, we extend the Cloud application management ecosystem OpenTOSCA to support executing management choreographies.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-08&engl=0}
}
@inproceedings {INPROC-2016-05,
   author = {Marigianna Skouradaki and Vincenzo Ferme and Cesare Pautasso and Frank Leymann and Andr'e van Hoorn},
   title = {{Micro-Benchmarking BPMN 2.0 Workflow Management Systems with Workflow Patterns}},
   booktitle = {28th International Conference, CAiSE 2016, Ljubljana, Slovenia, June 13-17, 2016, Proceedings,Springer Lecture Notes in Computer Science},
   publisher = {Springer},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {67--82},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2016},
   keywords = {Benchmarking; Microbenchmark; Workflow Engine; BPMN 2.0; Workflow Patterns; Workflow Management Systems},
   language = {Deutsch},
   cr-category = {D.2.2 Software Engineering Design Tools and Techniques,     D.2.8 Software Engineering Metrics,     D.4.8 Operating Systems Performance},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {Although Workflow Management Systems (WfMSs) are a key component in workflow technology, research work for assessing and comparing their performance is limited. This work proposes the first micro- benchmark for WfMSs that can execute BPMN 2.0 workflows. To this end, we focus on studying the performance impact of well-known workflow patterns expressed in BPMN 2.0 with respect to three open source WfMSs (i.e., Activiti, jBPM and Camunda). We executed all the experiments under a reliable environment and produced a set of meaningful metrics. This paper contributes to the area of workflow technology by defining building blocks for more complex BPMN 2.0 WfMS benchmarks. The results have shown bottlenecks on architectural design decisions, resource utilization, and limits on the load a WfMS can sustain, especially for the cases of complex and parallel structures. Experiments on a mix of workflow patterns indicated that there are no unexpected performance side effects when executing different workflow patterns concurrently, although the duration of the individual workflows that comprised the mix was increased.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-05&engl=0}
}
@inproceedings {INPROC-2016-04,
   author = {Michael Hahn and Dimka Karastoyanova and Frank Leymann},
   title = {{Data-Aware Service Choreographies through Transparent Data Exchange}},
   booktitle = {Proceedings of the 16th International Conference on Web Engineering (ICWE'16)},
   publisher = {Springer International Publishing},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Lecture Notes in Computer Science (LNCS)},
   volume = {9671},
   pages = {357--364},
   type = {Konferenz-Beitrag},
   month = {Juni},
   year = {2016},
   doi = {10.1007/978-3-319-38791-8_20},
   keywords = {Service Choreographies; Transparent Data Exchange; Decentralized Data Flow; Data Flow Optimization},
   language = {Englisch},
   cr-category = {H.4.1 Office Automation,     C.2.4 Distributed Systems},
   contact = {Michael Hahn: michael.hahn@iaas.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {Our focus in this paper is on enabling the decoupling of data flow, data exchange and management from the control flow in service compositions and choreographies through novel middleware abstractions and realization. This allows us to perform the data flow of choreographies in a peer-to-peer fashion decoupled from their control flow. Our work is motivated by the increasing importance and business value of data in the fields of business process management, scientific workflows and the Internet of Things, all of which profiting from the recent advances in data science and Big data. Our approach comprises an application life cycle that inherently introduces data exchange and management as a first-class citizen and defines the functions and artifacts necessary for enabling transparent data exchange. Moreover, we present an architecture of the supporting system that contains the Transparent Data Exchange middleware which enables the data exchange and management on behalf of service choreographies and provides methods for the optimization of the data exchange during the execution of service choreographies.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-04&engl=0}
}
@inproceedings {INPROC-2016-03,
   author = {Johanna Barzen and Michael Falkenthal and Frank Hentschel and Frank Leymann and Tino Strehl},
   title = {{{\"A}hnlichkeitssuche in den Digital Humanities: Semi-automatische Identifikation von Kostu\&\#776;mmustern}},
   booktitle = {Konferenzabstracts DHd 2016 ``Modellierung - Vernetzung – Visualisierung: Die Digital Humanities als f{\"a}cher{\"u}bergreifendes Forschungsparadigma''},
   editor = {Elisabeth Burr},
   address = {Leipzig},
   publisher = {nisaba verlag},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {271--273},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {2016},
   isbn = {ISBN 978-3-941379-05-3},
   keywords = {Costuem-Language; Data-Visualization; Pattern Research; Visual Data Mining, Kost{\"u}m Muster, vestiment{\"a}re Kommunikation},
   language = {Deutsch},
   cr-category = {D.2.2 Software Engineering Design Tools and Techniques,     J.5 Arts and Humanities},
   contact = {johanna.barzen@web.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {Kost{\"u}me in Filmen sind ein wichtiges Gestaltungselement der diegetischen Welt. Mit MUSE (MUster Suchen und Erkennen) verfolgen wir das Ziel, Konventionen zu identifizieren und zu Mustern zu abstrahieren, die sich entwickelt haben, um Kost{\"u}me als kommunikatives, bedeutungstragendes Element zu nutzen. Hier m{\"o}chten wir vorstellen, wie man die taxonomische Struktur der Daten nutzen kann, um diese nach ihrer {\"A}hnlichkeit hin selektiv auswerten und zu visualisieren, um Hinweise auf m{\"o}gliche Kost{\"u}mmuster zu erhalten.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-03&engl=0}
}
@inproceedings {INPROC-2016-02,
   author = {Vasilios Andrikopoulos and Marina Bitsaki and Santiago G{\'o}mez S{\'a}ez and Michael Hahn and Dimka Karastoyanova and Giorgios Koutras and Alina Psycharaki},
   title = {{Evaluating the Effect of Utility-based Decision Making in Collective Adaptive Systems}},
   booktitle = {Proceedings of the 6th International Conference on Cloud Computing and Service Science (CLOSER 2016)},
   address = {Rome, Italy},
   publisher = {SciTePress},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1--10},
   type = {Konferenz-Beitrag},
   month = {April},
   year = {2016},
   language = {Deutsch},
   cr-category = {D.2.0 Software Engineering General,     D.2.11 Software Engineering Software Architectures,     D.2.12 Software Engineering Interoperability},
   contact = {Vasilios Andrikopoulos: andrikopoulos@iaas.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {Utility, defined as the perceived satisfaction with a service, provides the ideal means for decision making on the level of individual entities and collectives participating in a large-scale dynamic system. Previous works have already introduced the concept into the area of collective adaptive systems, and have discussed what is the necessary infrastructure to support the realization of the involved theoretical concepts into actual decision making. In this work we focus on two aspects. First, we provide a concrete utility model for a case study that is part of a larger research project. Second, we incorporate this model into our implementation of the proposed architecture. More importantly, we design and execute an experiment that aims to empirically evaluate the use of utility for decision making by comparing it against simpler decision making mechanisms.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-02&engl=0}
}
@inproceedings {INPROC-2016-01,
   author = {Santiago G{\'o}mez S{\'a}ez and Vasilios Andrikopoulos and Frank Leymann},
   title = {{Consolidation of Performance and Workload Models in Evolving Cloud Application Topologies}},
   booktitle = {Proceedings of the 6th International Conference on Cloud Computing and Service Science (CLOSER 2016)},
   address = {Rome, Italy},
   publisher = {SciTePress},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {160--169},
   type = {Konferenz-Beitrag},
   month = {April},
   year = {2016},
   language = {Englisch},
   cr-category = {C.2.4 Distributed Systems,     D.2.8 Software Engineering Metrics,     D.2.11 Software Engineering Software Architectures},
   contact = {Santiago G{\'o}mez S{\'a}ez: santiago.gomez-saez@iaas.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {The increase of available Cloud services and providers has contributed to accelerate the development and has broaden the possibilities for building and provisioning Cloud applications in heterogeneous Cloud environments. The necessity for satisfying business and operational requirements in an agile and rapid manner has created the need for adapting traditional methods and tooling support for building and provisioning Cloud applications. Focusing on the application's performance and its evolution, we observe a lack of support for specifying, capturing, analyzing, and reasoning on the impact of using different Cloud services and configurations. This paper bridges such a gap by proposing the conceptual and tooling support to enhance Cloud application topology models to capture and analyze the evolution of the application's performance. The tooling support is built upon an existing modeling environment, which is subsequently evaluated using the MediaWiki (Wikipedia) application and its realistic workload.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-01&engl=0}
}
@article {ART-2016-26,
   author = {Uwe Breitenb{\"u}cher and Christian Endres and K{\'a}lm{\'a}n K{\'e}pes and Oliver Kopp and Frank Leymann and Sebastian Wagner and Johannes Wettinger and Michael Zimmermann},
   title = {{The OpenTOSCA Ecosystem - Concepts \& Tools}},
   journal = {European Space project on Smart Systems, Big Data, Future Internet - Towards Serving the Grand Societal Challenges - Volume 1: EPS Rome 2016},
   publisher = {SciTePress},
   pages = {112--130},
   type = {Artikel in Zeitschrift},
   month = {Dezember},
   year = {2016},
   isbn = {978-989-758-207-3},
   doi = {10.5220/0007903201120130},
   keywords = {TOSCA; OpenTOSCA; Orchestration; Management; Cloud},
   language = {Englisch},
   cr-category = {D.2.2 Software Engineering Design Tools and Techniques,     D.2.9 Software Engineering Management},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {Automating the provisioning and management of Cloud applications is one of the most important issues in Cloud Computing. The Topology and Orchestration Specification for Cloud Applications (TOSCA) is an OASIS standard for describing Cloud applications and their management in a portable and interoperable manner. TOSCA enables modeling the application's structure in the form of topology models and employs the concept of executable management plans to describe all required management functionality regarding the application. In this paper, we give an overview of TOSCA and the OpenTOSCA Ecosystem, which is an implementation of the TOSCA standard. The ecosystem consists of standard-compliant tools that enable modeling application topology models and automating the provisioning and management of the modeled applications.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-26&engl=0}
}
@article {ART-2016-22,
   author = {Johannes Wettinger and Uwe Breitenb{\"u}cher and Michael Falkenthal and Frank Leymann},
   title = {{Collaborative Gathering and Continuous Delivery of DevOps Solutions through Repositories}},
   journal = {Computer Science - Research and Development},
   publisher = {Springer},
   type = {Artikel in Zeitschrift},
   month = {November},
   year = {2016},
   language = {Englisch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     C.2.4 Distributed Systems},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {Collaboration is a key aspect for establishing DevOps-oriented processes because diverse experts such as developers and operations personnel need to efficiently work together to deliver applications. For this purpose, highly automated continuous delivery pipelines are established, consisting of several stages and their corresponding application environments (development, test, production, etc.). The DevOps community provides a huge variety of tools and reusable artifacts (i.e. DevOps solutions such as deployment engines, configuration definitions, container images, etc.) to implement such application environments. This paper presents the concept of collaborative solution repositories, which are based on established software engineering practices. This helps to systematically maintain and link diverse solutions. We further discuss how discovery and capturing of such solutions can be automated. To utilize this knowledge (made of linked DevOps solutions), we apply continuous delivery principles to create diverse knowledge base instances through corresponding pipelines. Finally, an integrated architecture is outlined and validated using a prototype implementation.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-22&engl=0}
}
@article {ART-2016-20,
   author = {Karolina Vukojevic-Haupt and Florian Haupt and Frank Leymann},
   title = {{On-demand provisioning of workflow middleware and services into the cloud: an overview}},
   journal = {Computing},
   publisher = {Springer Wien},
   type = {Artikel in Zeitschrift},
   month = {Oktober},
   year = {2016},
   doi = {10.1007/s00607-016-0521-x},
   isbn = {full-text view-only version: http://rdcu.be/lL8H},
   keywords = {on-demand provisioning; cloud; service-oriented computing; eScience; dynamic provisioning; SOC; automatic provisioning; automatic deployment},
   language = {Englisch},
   cr-category = {C.2.4 Distributed Systems,     D.2.11 Software Engineering Software Architectures,     I.6.7 Simulation Support Systems},
   contact = {karolina.vukojevic@iaas.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {One of the core principles in service oriented computing is that services are always on and available. There are however domains where running services all the time is not suitable, for example when applying simulations workflows in the eScience domain. The simulation services orchestrated by these workflows are typically used only rarely and irregularly, keeping them running all the time would result in a significant waste of resources. As a consequence, we developed the approach of on-demand provisioning of workflow middleware and services. In this paper we will give an overview about our work. We will present the motivation and main idea of our solution approach and will also provide details about some of the results of our work. The overview about our previous and current work is then complemented by a detailed discussion and comparison of the roles involved in both concepts, traditional service oriented computing as well as our newly developed on-demand provisioning approach.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-20&engl=0}
}
@article {ART-2016-19,
   author = {Marina Bitsaki and Christos Koutras and George Koutras and Frank Leymann and Frank Steimle and Sebastian Wagner and Matthias Wieland},
   title = {{ChronicOnline: Implementing a mHealth solution for monitoring and early alerting in chronic obstructive pulmonary disease}},
   journal = {Health Informatics Journal},
   publisher = {Sage Publications},
   pages = {1--10},
   type = {Artikel in Zeitschrift},
   month = {April},
   year = {2016},
   doi = {10.1177/1460458216641480},
   keywords = {chronic obstructive pulmonary disease; cloud computing; health services; mobile applications; monitoring},
   language = {Englisch},
   cr-category = {C.2.4 Distributed Systems,     H.2.8 Database Applications,     J.3 Life and Medical Sciences},
   ee = {http://jhi.sagepub.com/content/early/2016/04/16/1460458216641480.full.pdf+html},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {Lack of time or economic difficulties prevent chronic obstructive pulmonary disease patients from communicating regularly with their physicians, thus inducing exacerbation of their chronic condition and possible hospitalization. Enhancing Chronic patients{\^a}€™ Health Online proposes a new, sustainable and innovative business model that provides at low cost and at significant savings to the national health system, a preventive health service for chronic obstructive pulmonary disease patients, by combining human medical expertise with state-of-the-art online service delivery based on cloud computing, service-oriented architecture, data analytics, and mobile applications. In this article, we implement the frontend applications of the Enhancing Chronic patients{\^a}€™ Health Online system and describe their functionality and the interfaces available to the users.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-19&engl=0}
}
@article {ART-2016-17,
   author = {Johanna Barzen and Uwe Breitenb{\"u}cher and Linus Eusterbrock and Michael Falkenthal and Frank Hentschel and Frank Leymann},
   title = {{The vision for MUSE4Music. Applying the MUSE method in musicology}},
   journal = {Computer Science - Research and Development},
   address = {Heidelberg},
   publisher = {Springer},
   pages = {1--6},
   type = {Artikel in Zeitschrift},
   month = {November},
   year = {2016},
   doi = {10.1007/s00450-016-0336-1},
   keywords = {Pattern Language; Pattern; Digital Humanities; Musical patterns; Mining; Musical expressivity},
   language = {Englisch},
   cr-category = {H.3.3 Information Search and Retrieval,     I.5.2 Pattern Recognition Design Methodology,     J.5 Arts and Humanities},
   contact = {Johanna Barzen johanna\_barzen@iaas.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {Investigating the emotional impact of historical music, e.g. music of the 19th century, is a complex challenge since the subjects that listened to this music and their emotions are forever gone. As a result, asking them for their experiences is not possible anymore and we need other means to gain insights into the expressive quality of music of this century. In this vision paper, we describe a pattern-based method called MUSE4Music to quantitatively find similarities in different pieces of music. The reconstruction of musical patterns will allow us to draw conclusions from erratic documents that go far beyond the single pieces they are referring to.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-17&engl=0}
}
@article {ART-2016-15,
   author = {Michael Falkenthal and Johanna Barzen and Uwe Breitenb{\"u}cher and Sascha Br{\"u}gmann and Daniel Joos and Frank Leymann and Michael Wurster},
   title = {{Pattern Research in the Digital Humanities: How Data Mining Techniques Support the Identification of Costume Patterns}},
   journal = {Computer Science - Research and Development},
   publisher = {Springer},
   type = {Artikel in Zeitschrift},
   month = {November},
   year = {2016},
   language = {Englisch},
   cr-category = {H.2.8 Database Applications,     H.3.3 Information Search and Retrieval},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {Costumes are prominent in transporting a character's mood, a certain stereotype, or character trait in a film. The concept of patterns, applied to the domain of costumes in films, can help costume designers to improve their work by capturing knowledge and experience about proven solutions for recurring design problems. However, finding such Costume Patterns is a difficult and time-consuming task, because possibly hundreds of different costumes of a huge number of films have to be analyzed to find commonalities. In this paper, we present a Semi-Automated Costume Pattern Mining Method to discover indicators for Costume Patterns from a large data set of documented costumes using data mining and data warehouse techniques. We validate the presented approach by a prototypical implementation that builds upon the Apriori algorithm for mining association rules and standard data warehouse technologies.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-15&engl=0}
}
@article {ART-2016-09,
   author = {Johannes Wettinger and Vasilios Andrikopoulos and Frank Leymann and Steve Strauch},
   title = {{Middleware-oriented Deployment Automation for Cloud Applications}},
   journal = {IEEE Transactions on Cloud Computing},
   publisher = {IEEE},
   type = {Artikel in Zeitschrift},
   month = {Februar},
   year = {2016},
   issn = {2168-7161},
   doi = {10.1109/TCC.2016.2535325},
   language = {Englisch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     C.2.4 Distributed Systems},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {Fully automated provisioning and deployment of applications is one of the most essential prerequisites to make use of the benefits of Cloud computing in order to reduce the costs for managing applications. A huge variety of approaches, tools, and providers are available to automate the involved processes. The DevOps community, for instance, provides tooling and reusable artifacts to implement deployment automation in an application-oriented manner. Platform-as-a-Service frameworks are available for the same purpose. In this work we systematically classify and characterize available deployment approaches independently from the underlying technology used. For motivation and evaluation purposes, we choose Web applications with different technology stacks and analyze their specific deployment requirements. Afterwards, we provision these applications using each of the identified types of deployment approaches in the Cloud to perform qualitative and quantitative measurements. Finally, we discuss the evaluation results and derive recommendations to decide which deployment approach to use based on the deployment requirements of an application. Our results show that deployment approaches can also be efficiently combined if there is no 'best fit' for a particular application.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-09&engl=0}
}
@article {ART-2016-08,
   author = {Johannes Wettinger and Uwe Breitenb{\"u}cher and Frank Leymann},
   title = {{Enhancing Cloud Application DevOps Using Dynamically Tailored Deployment Engines}},
   journal = {Services Transactions on Cloud Computing},
   publisher = {Online},
   volume = {4},
   number = {1},
   pages = {1--15},
   type = {Artikel in Zeitschrift},
   month = {Januar},
   year = {2016},
   issn = {2326-7550},
   language = {Englisch},
   cr-category = {D.2.11 Software Engineering Software Architectures,     C.2.4 Distributed Systems},
   ee = {http://hipore.com/stcc/2016/IJCC-Vol4-No1-2016b.pdf},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {Shortening software release cycles increasingly becomes a critical competitive advantage as today's users, customers, and other stakeholders expect quick responses to occurring issues and feature requests. DevOps practices and Cloud computing are two key paradigms to tackle these issues by enabling rapid and continuous delivery of applications, utilizing automated software delivery pipelines. However, it is a complex and sophisticated challenge to implement such pipelines by installing, configuring, orchestrating, and integrating the required deployment automation solutions. Therefore, we present a method in conjunction with a framework and implementation to dynamically generate tailored deployment automation engines for specific application stacks, which are packaged in a portable manner to run them on various platforms and infrastructures. The core of our work is based on generating APIs for arbitrary deployment executables such as scripts and plans that perform different tasks in the automated deployment process. As a result, deployment tasks can be triggered through generated API endpoints, abstracting from lower-level, technical details of diverse deployment automation tooling. Beside a quantitative evaluation, we discuss two case studies in this context, one focusing on microservice architectures, the other one considering application functionality and its relation to deployment functionality.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-08&engl=0}
}
@inbook {INBOOK-2016-05,
   author = {Johannes Wettinger and Uwe Breitenb{\"u}cher and Frank Leymann},
   title = {{Streamlining APIfication by Generating APIs for Diverse Executables Using Any2API}},
   series = {Cloud Computing and Services Science},
   publisher = {Springer International Publishing},
   series = {Communications in Computer and Information Science},
   volume = {581},
   pages = {216--238},
   type = {Beitrag in Buch},
   month = {Februar},
   year = {2016},
   doi = {10.1007/978-3-319-29582-4_12},
   isbn = {978-3-319-29581-7},
   keywords = {API; APIfication; Service; Web; REST; DevOps; Deployment; Cloud computing},
   language = {Englisch},
   cr-category = {D.2.12 Software Engineering Interoperability,     C.2.4 Distributed Systems},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {For many of today's systems, diverse application and management functionality is exposed by APIs to be used for integration and orchestration purposes. One important use case is the implementation of fully automated deployment processes that are utilized to create instances of Web applications or back-ends for mobile apps. Not all functionality that needs to be integrated in this context is exposed through APIs natively: such processes typically require a multitude of other heterogeneous technologies such as scripting languages and deployment automation tooling. This makes it hard to seamlessly and efficiently combine and integrate different kinds of building blocks such as scripts and configuration definitions that are required. Therefore, in this paper, we present a generic approach to automatically generate API implementations for arbitrary executables such as scripts and compiled programs, which are not natively exposed as APIs. This APIfication enables the uniform invocation of various heterogeneous building blocks, but aims to avoid the costly and manual wrapping of existing executables. In addition, we present the modular and extensible open-source framework Any2API that implements the previously introduced APIfication approach. We evaluate the APIfication approach as well as the Any2API framework by measuring the overhead of generating and using API implementations. Moreover, a detailed case study is conducted to confirm the technical feasibility of the presented approach.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2016-05&engl=0}
}
@inbook {INBOOK-2016-04,
   author = {Uwe Breitenb{\"u}cher and Tobias Binz and Oliver Kopp and K{\'a}lm{\'a}n K{\'e}pes and Frank Leymann and Johannes Wettinger},
   title = {{Hybrid TOSCA Provisioning Plans: Integrating Declarative and Imperative Cloud Application Provisioning Technologies}},
   series = {Cloud Computing and Services Science},
   publisher = {Springer International Publishing},
   series = {Communications in Computer and Information Science},
   volume = {581},
   pages = {239--262},
   type = {Beitrag in Buch},
   month = {Februar},
   year = {2016},
   doi = {10.1007/978-3-319-29582-4_13},
   isbn = {978-3-319-29581-7},
   keywords = {Cloud application provisioning; TOSCA; Hybrid plans; Automation; Declarative modelling; Imperative modelling; Integration},
   language = {Englisch},
   cr-category = {K.6 Management of Computing and Information Systems},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware;     Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {The efficient provisioning of complex applications is one of the most challenging issues in Cloud Computing. Therefore, various provisioning and configuration management technologies have been developed that can be categorized as follows: imperative approaches enable a precise specification of the low-level tasks to be executed whereas declarative approaches focus on describing the desired goals and constraints. Since complex applications employ a plethora of heterogeneous components that must be wired and configured, typically multiple of these technologies have to be integrated to automate the entire provisioning process. In a former work, we presented a workflow modelling concept that enables the seamless integration of imperative and declarative technologies. This paper is an extension of that work to integrate the modelling concept with the Cloud standard TOSCA. In particular, we show how Hybrid Provisioning Plans can be created that retrieve all required information about the desired provisioning directly from the corresponding TOSCA model. We validate the practical feasibility of the concept by extending the OpenTOSCA runtime environment and the workflow language BPEL.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2016-04&engl=0}
}
@inbook {INBOOK-2016-02,
   author = {Santiago G{\'o}mez S{\'a}ez and Vasilios Andrikopoulos and Michael Hahn and Dimka Karastoyanova and Frank Leymann and Marigianna Skouradaki and Karolina Vukojevic-Haupt},
   title = {{Performance and Cost Trade-Off in IaaS Environments: A Scientific Workflow Simulation Environment Case Study}},
   series = {Cloud Computing and Services Science},
   publisher = {Springer},
   series = {Communications in Computer and Information Science},
   volume = {581},
   pages = {153--170},
   type = {Beitrag in Buch},
   month = {Februar},
   year = {2016},
   language = {Englisch},
   cr-category = {C.2.4 Distributed Systems,     D.2.8 Software Engineering Metrics,     D.2.11 Software Engineering Software Architectures},
   contact = {Santiago G{\'o}mez S{\'a}ez: santiago.gomez-saez@iaas.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen},
   abstract = {The adoption of the workflow technology in the eScience domain has contributed to the increase of simulation-based applications orchestrating different services in a flexible and error-free manner. The nature of the provisioning and execution of such simulations makes them potential candidates to be migrated and executed in Cloud environments. The wide availability of Infrastructure-as-a-Service (IaaS) Cloud offerings and service providers has contributed to a raise in the number of supporters of partially or completely migrating and running their scientific experiments in the Cloud. Focusing on Scientific Workflow-based Simulation Environments (SWfSE) applications and their corresponding underlying runtime support, in this research work we aim at empirically analyzing and evaluating the impact of migrating such an environment to multiple IaaS infrastructures. More specifically, we focus on the investigation of multiple Cloud providers and their corresponding optimized and non-optimized IaaS offerings with respect to their offered performance, and its impact on the incurred monetary costs when migrating and executing a SWfSE. The experiments show significant performance improvements and reduced monetary costs when executing the simulation environment in off-premise Clouds.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2016-02&engl=0}
}