Publications AS: Bibliography BibTeX
@inproceedings {INPROC-2024-11,
author = {Michael Behringer and Dennis Treder-Tschechlov and Jannis Rapp},
title = {{Empowering Domain Experts to Enhance Clustering Results Through Interactive Refinement}},
booktitle = {Onizuka, M., et al. Database Systems for Advanced Applications. DASFAA 2024. Lecture Notes in Computer Science, vol 14856. Springer, Singapore.},
publisher = {Springer},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {518--522},
type = {Conference Paper},
month = {September},
year = {2024},
doi = {https://doi.org/10.1007/978-981-97-5575-2_51},
language = {German},
cr-category = {I.5.3 Pattern Recognition Clustering},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Data mining is crucial to gain knowledge from large amounts of data. One
popular data mining technique is clustering aiming to group similar data
together. This technique relies on domain knowledge to interpret the results.
However, the initial results are often insufficient and must be refined -
taking tremendous time and resources with unclear benefits. In this demo paper,
we introduce our novel user-centric approach that supports domain expert in
interactively refining clustering results to their needs by merging and
splitting clusters, specifying constraints, or by applying active learning -
combined in one single tool.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2024-11&engl=1}
}
@inproceedings {INPROC-2024-10,
author = {Dennis Treder-Tschechlov and Manuel Fritz and Holger Schwarz and Bernhard Mitschang},
title = {{Ensemble Clustering based on Meta-Learning and Hyperparameter Optimization}},
booktitle = {Proc. VLDB Endow. 17, 11 (July 2024), 2880–2892.},
editor = {Proceedings of the VLDB Endowment},
publisher = {ACM},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {2880--2892},
type = {Conference Paper},
month = {August},
year = {2024},
doi = {https://doi.org/10.14778/3681954.3681970},
language = {English},
cr-category = {I.5.3 Pattern Recognition Clustering},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Efficient clustering algorithms, such as k-Means, are often used in practice
because they scale well for large datasets. However, they are only able to
detect simple data characteristics. Ensemble clustering can overcome this
limitation by combining multiple results of efficient algorithms. However,
analysts face several challenges when applying ensemble clustering, i. e.,
analysts struggle to (a) efficiently generate an ensemble and (b) combine the
ensemble using a suitable consensus function with a corresponding
hyperparameter setting. In this paper, we propose EffEns, an efficient ensemble
clustering approach to address these challenges. Our approach relies on
meta-learning to learn about dataset characteristics and the correlation
between generated base clusterings and the performance of consensus functions.
We apply the learned knowledge to generate appropriate ensembles and select a
suitable consensus function to combine their results. Further, we use a
state-of-the-art optimization technique to tune the hyperparameters of the
selected consensus function. Our comprehensive evaluation on synthetic and
real-world datasets demonstrates that EffEns significantly outperforms
state-of-the-art approaches w.r.t. accuracy and runtime.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2024-10&engl=1}
}
@inproceedings {INPROC-2024-09,
author = {Dennis Treder-Tschechlov and Manuel Fritz and Holger Schwarz and Bernhard Mitschang},
title = {{Ensemble Clustering based on Meta-Learning and Hyperparameter Optimization}},
booktitle = {Proc. VLDB Endow. 17, 11 (July 2024), 2880–2892.},
editor = {Proceedings of the VLDB Endowment},
publisher = {ACM},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {2880--2892},
type = {Conference Paper},
month = {August},
year = {2024},
doi = {https://doi.org/10.14778/3681954.3681970},
language = {English},
cr-category = {I.5.3 Pattern Recognition Clustering},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Efficient clustering algorithms, such as k-Means, are often used in practice
because they scale well for large datasets. However, they are only able to
detect simple data characteristics. Ensemble clustering can overcome this
limitation by combining multiple results of efficient algorithms. However,
analysts face several challenges when applying ensemble clustering, i. e.,
analysts struggle to (a) efficiently generate an ensemble and (b) combine the
ensemble using a suitable consensus function with a corresponding
hyperparameter setting. In this paper, we propose EffEns, an efficient ensemble
clustering approach to address these challenges. Our approach relies on
meta-learning to learn about dataset characteristics and the correlation
between generated base clusterings and the performance of consensus functions.
We apply the learned knowledge to generate appropriate ensembles and select a
suitable consensus function to combine their results. Further, we use a
state-of-the-art optimization technique to tune the hyperparameters of the
selected consensus function. Our comprehensive evaluation on synthetic and
real-world datasets demonstrates that EffEns significantly outperforms
state-of-the-art approaches w.r.t. accuracy and runtime.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2024-09&engl=1}
}
@inproceedings {INPROC-2024-08,
author = {Dennis Treder-Tschechlov and Manuel Fritz and Holger Schwarz and Bernhard Mitschang},
title = {{Ensemble Clustering based on Meta-Learning and Hyperparameter Optimization}},
booktitle = {Proc. VLDB Endow. 17, 11 (July 2024), 2880–2892.},
editor = {Proceedings of the VLDB Endowment},
publisher = {ACM},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {2880--2892},
type = {Conference Paper},
month = {August},
year = {2024},
doi = {https://doi.org/10.14778/3681954.3681970},
language = {English},
cr-category = {I.5.3 Pattern Recognition Clustering},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Efficient clustering algorithms, such as k-Means, are often used in practice
because they scale well for large datasets. However, they are only able to
detect simple data characteristics. Ensemble clustering can overcome this
limitation by combining multiple results of efficient algorithms. However,
analysts face several challenges when applying ensemble clustering, i. e.,
analysts struggle to (a) efficiently generate an ensemble and (b) combine the
ensemble using a suitable consensus function with a corresponding
hyperparameter setting. In this paper, we propose EffEns, an efficient ensemble
clustering approach to address these challenges. Our approach relies on
meta-learning to learn about dataset characteristics and the correlation
between generated base clusterings and the performance of consensus functions.
We apply the learned knowledge to generate appropriate ensembles and select a
suitable consensus function to combine their results. Further, we use a
state-of-the-art optimization technique to tune the hyperparameters of the
selected consensus function. Our comprehensive evaluation on synthetic and
real-world datasets demonstrates that EffEns significantly outperforms
state-of-the-art approaches w.r.t. accuracy and runtime.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2024-08&engl=1}
}
@inproceedings {INPROC-2024-07,
author = {Dennis Treder-Tschechlov and Manuel Fritz and Holger Schwarz and Bernhard Mitschang},
title = {{Ensemble Clustering based on Meta-Learning and Hyperparameter Optimization}},
booktitle = {Proc. VLDB Endow. 17, 11 (July 2024), 2880–2892.},
editor = {Proceedings of the VLDB Endowment},
publisher = {VLDB Endowment},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {2880--2892},
type = {Conference Paper},
month = {August},
year = {2024},
language = {English},
cr-category = {I.5.3 Pattern Recognition Clustering},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Efficient clustering algorithms, such as k-Means, are often used in practice
because they scale well for large datasets. However, they are only able to
detect simple data characteristics. Ensemble clustering can overcome this
limitation by combining multiple results of efficient algorithms. However,
analysts face several challenges when applying ensemble clustering, i. e.,
analysts struggle to (a) efficiently generate an ensemble and (b) combine the
ensemble using a suitable consensus function with a corresponding
hyperparameter setting. In this paper, we propose EffEns, an efficient ensemble
clustering approach to address these challenges. Our approach relies on
meta-learning to learn about dataset characteristics and the correlation
between generated base clusterings and the performance of consensus functions.
We apply the learned knowledge to generate appropriate ensembles and select a
suitable consensus function to combine their results. Further, we use a
state-of-the-art optimization technique to tune the hyperparameters of the
selected consensus function. Our comprehensive evaluation on synthetic and
real-world datasets demonstrates that EffEns significantly outperforms
state-of-the-art approaches w.r.t. accuracy and runtime.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2024-07&engl=1}
}
@inproceedings {INPROC-2024-06,
author = {Christoph Stach and Yunxuan Li and Laura Schuiki and Bernhard Mitschang},
title = {{LALO—A Virtual Data Lake Zone for Composing Tailor-Made Data Products on Demand}},
booktitle = {Proceedings of the 35th International Conference on Database and Expert Systems Applications (DEXA 2024)},
editor = {Christine Strauss and Toshiyuki Amagasa and Giuseppe Manco and Gabriele Kotsis and A Min Tjoa and Ismail Khalil},
address = {Cham},
publisher = {Springer},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Computer Science},
volume = {14911},
pages = {288--305},
type = {Conference Paper},
month = {August},
year = {2024},
isbn = {978-3-031-68311-4},
issn = {0302-9743},
doi = {10.1007/978-3-031-68312-1_22},
keywords = {Data Product; Virtual Data Lake Zone; Data Stream Adaptation},
language = {English},
cr-category = {H.2.7 Database Administration,
E.2 Data Storage Representations,
H.3.3 Information Search and Retrieval,
H.2.8 Database Applications},
contact = {Senden Sie eine E-Mail an \<christoph.stach@ipvs.uni-stuttgart.de\>.},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The emerging paradigm of data products, which has become increasingly popular
recently due to the rise of data meshes and data marketplaces, also poses
unprecedented challenges for data management. Current data architectures,
namely data warehouses and data lakes, are not able to meet these challenges
adequately. In particular, these architectures are not designed for a
just-in-time provision of highly customized data products tailored perfectly to
the needs of customers. In this paper, we therefore present a virtual data lake
zone for composing tailor-made data products on demand, called LALO. LALO uses
data streaming technologies to enable just-in-time composing of data products
without allocating storage space in the data architecture permanently. In order
to enable customers to tailor data products to their needs, LALO uses a novel
mechanism that enables live adaptation of data streams. Evaluation results show
that the overhead for such an adaptation is negligible. Therefore, LALO
represents an efficient solution for the appropriate handling of data products,
both in terms of storage space and runtime.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2024-06&engl=1}
}
@inproceedings {INPROC-2024-05,
author = {Jan Schneider and Christoph Gr{\"o}ger and Arnold Lutsch},
title = {{The Data Platform Evolution: From Data Warehouses over Data Lakes to Lakehouses}},
booktitle = {Proceedings of the 34th GI-Workshop on Foundations of Databases (Grundlagen von Datenbanken), Hirsau, Germany},
editor = {Holger Schwarz},
publisher = {CEUR Workshop Proceedings},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {CEUR Workshop Proceedings},
volume = {3714},
pages = {67--71},
type = {Workshop Paper},
month = {July},
year = {2024},
issn = {1613-0073},
keywords = {Lakehouse; Data Warehouse; Data Lake; Data Management; Data Analytics},
language = {English},
cr-category = {H.3.4 Information Storage and Retrieval Systems and Software,
H.4.2 Information Systems Applications Types of Systems},
ee = {https://ceur-ws.org/Vol-3714/invited2.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The continuously increasing availability of data and the growing maturity of
data-driven analysis techniques have encouraged enterprises to collect and
analyze huge amounts of business-relevant data in order to exploit it for
competitive advantages. To facilitate these processes, various platforms for
analytical data management have been developed: While data warehouses have
traditionally been used by business analysts for reporting and OLAP, data lakes
emerged as an alternative concept that also supports advanced analytics. As
these two common types of data platforms show rather contrary characteristics
and target different user groups and analytical approaches, enterprises usually
need to employ both of them, resulting in complex, error-prone and
cost-expensive architectures. To address these issues, efforts have recently
become apparent to combine features of data warehouses and data lakes into
so-called lakehouses, which pursue to serve all kinds of analytics from a
single data platform. This paper provides an overview on the evolution of
analytical data platforms from data warehouses over data lakes to lakehouses
and elaborates on the vision and characteristics of the latter. Furthermore, it
addresses the question of what aspects common data lakes are currently missing
that prevent them from transitioning to lakehouses.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2024-05&engl=1}
}
@inproceedings {INPROC-2024-04,
author = {Jan Schneider and Arnold Lutsch and Christoph Gr{\"o}ger and Holger Schwarz and Bernhard Mitschang},
title = {{First Experiences on the Application of Lakehouses in Industrial Practice}},
booktitle = {Proceedings of the 35th GI-Workshop on Foundations of Databases (Grundlagen von Datenbanken), Herdecke, Germany},
editor = {Uta St{\"o}rl},
publisher = {CEUR Workshop Proceedings},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {CEUR Workshop Proceedings},
volume = {3710},
pages = {3--8},
type = {Workshop Paper},
month = {June},
year = {2024},
isbn = {1613-0073},
keywords = {Data Lakehouse; Data Platform; Platform Architecture; Data Analytics; Case Study; Industry Experience},
language = {English},
cr-category = {H.3.4 Information Storage and Retrieval Systems and Software,
H.4.2 Information Systems Applications Types of Systems},
ee = {https://ceur-ws.org/Vol-3710/paper1.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In recent years, so-called lakehouses have emerged as a new type of data
platform that intends to combine characteristics of data warehouses and data
lakes. Although companies started to employ the associated concepts and
technologies as part of their analytics architectures, little is known about
their practical medium- and long-term experiences as well as proven
architectural decisions. Additionally, there is only limited knowledge about
how lakehouses can be utilized effectively in an industrial context. Hence, it
remains unclear under which circumstances lakehouses represent a viable
alternative to conventional data platforms. To address this gap, we conducted a
case study on a real-world industrial case, in which manufacturing data needs
to be managed and analytically exploited. Within the scope of this case, a
dedicated analytics department has been testing and leveraging a lakehouse
approach for several months in a productive environment with high data volumes
and various types of analytical workloads. The paper at hand presents the
results of our within-case analyses and focuses on the industrial setting of
the case as well as the architecture of the utilized lakehouse. This way, it
provides preliminary insights on the application of lakehouses in industrial
practice and refers to useful architectural decisions.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2024-04&engl=1}
}
@inproceedings {INPROC-2024-03,
author = {Andrea Fieschi and Pascal Hirmer and Sachin Agrawal and Christoph Stach and Bernhard Mitschang},
title = {{HySAAD - A Hybrid Selection Approach for Anonymization by Design in the Automotive Domain}},
booktitle = {Proceedings of the 25th IEEE International Conference on Mobile Data Management (MDM 2024)},
editor = {Chiara Renso and Mahmoud Sakr and Walid G Aref and Ashley Song and Cheng Long},
address = {Los Alamitos, Washington, Tokyo},
publisher = {IEEE Computer Society Conference Publishing Services},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {203--210},
type = {Conference Paper},
month = {June},
year = {2024},
isbn = {979-8-3503-7455-1},
issn = {2375-0324},
doi = {10.1109/MDM61037.2024.00044},
keywords = {anonymization; connected vehicles; privacy protection; metrics},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues},
contact = {Senden Sie eine E-Mail an \<andrea.fieschi@ipvs.uni-stuttgart.de\>.},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The increasing connectivity and data exchange between vehicles and the cloud
have led to growing privacy concerns. To keep on gaining product insights
through data collection while guaranteeing privacy protection, an
anonymization-by-design approach should be used. A rising number of
anonymization methods, not limited to the automotive domain, can be found in
the literature and practice. The developers need support to select the suitable
anonymization technique. To this end, we make the following two contributions:
1) We apply our knowledge from the automotive domain to outline the usage of
qualitative metrics for anonymization techniques assessment; 2) We introduce
HySAAD, a hybrid selection approach for anonymization by design that leverages
this groundwork by recommending appropriate anonymization techniques for each
mobile data analytics use case based on both, qualitative (i.e., {\ss}oft``) metrics
and quantitative (i.e., ''hard``) metrics. Using a real-world use case from the
automotive, we demonstrate the applicability and effectiveness of HySAAD.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2024-03&engl=1}
}
@inproceedings {INPROC-2024-02,
author = {Yunxuan Li and Christoph Stach and Bernhard Mitschang},
title = {{PaDS: An adaptive and privacy-enabling Data Pipeline for Smart Cars}},
booktitle = {Proceedings of the 25th IEEE International Conference on Mobile Data Management (MDM 2024)},
editor = {Chiara Renso and Mahmoud Sakr and Walid G Aref and Kyoung-Sook Kim and Manos Papagelis and Dimitris Sacharidis},
address = {Los Alamitos, Washington, Tokyo},
publisher = {IEEE Computer Society Conference Publishing Services},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {41--50},
type = {Conference Paper},
month = {June},
year = {2024},
isbn = {979-8-3503-7455-1},
issn = {2375-0324},
doi = {10.1109/MDM61037.2024.00026},
keywords = {smart car; privacy-enabling data pipeline; datastream runtime adaptation; mobile data privacy management},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues},
contact = {Senden Sie eine E-Mail an \<yunxuan.li@ipvs.uni-stuttgart.de\>.},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The extensive use of onboard sensors in smart cars enables the collection,
processing, and dissemination of large amounts of mobile data containing
information about the vehicle, its driver, and even bystanders. Despite the
undoubted benefits of such smart cars, this leads to significant privacy
concerns. Due to their inherent mobility, the situation of smart cars changes
frequently, and with it, the appropriate measures to counteract the exposure of
private data. However, data management in such vehicles lacks sufficient
support for this privacy dynamism. We therefore introduce PaDS, a framework for
Privacy adaptive Data Stream. The focus of this paper is to enable adaptive
data processing within the vehicle data stream. With PaDS, Privacy-Enhancing
Technologies can be deployed dynamically in the data pipeline of a smart car
according to the current situation without user intervention. With a comparison
of state-of-the-art approaches, we demonstrate that our solution is very
efficient as it does not require a complete restart of the data pipeline.
Moreover, compared to a static approach, PaDS causes only minimal overhead
despite its dynamic adaptation of the data pipeline to react to changing
privacy requirements. This renders PaDS an effective privacy solution for smart
cars.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2024-02&engl=1}
}
@inproceedings {INPROC-2024-01,
author = {Dennis Przytarski and Christoph Stach and Bernhard Mitschang},
title = {{Assessing Data Layouts to Bring Storage Engine Functionality to Blockchain Technology}},
booktitle = {Proceedings of the 57th Hawaii International Conference on System Sciences (HICSS '24)},
editor = {Tung X. Bui},
publisher = {ScholarSpace},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {5091--5100},
type = {Conference Paper},
month = {January},
year = {2024},
isbn = {978-0-9981331-7-1},
keywords = {blockchain; storage engine; queries},
language = {English},
cr-category = {H.3.1 Content Analysis and Indexing,
H.3.2 Information Storage,
H.3.3 Information Search and Retrieval},
ee = {https://hdl.handle.net/10125/106995},
contact = {Senden Sie eine E-Mail an \<Christoph.Stach@ipvs.uni-stuttgart.de\>.},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Nowdays, modern applications often use blockchains as a secure data store.
However, querying blockchain data is more challenging than querying
conventional databases due to blockchains being primarily designed for the
logging of asset transfers, such as cryptocurrencies, rather than storing and
reading generic data. To improve the experience of querying blockchain data and
make it comparable to querying conventional databases, new design approaches of
the storage engine for blockchain technology are required. An important aspect
is the data layout of a block, as it plays a crucial role in facilitating
reading of blockchain data. In this paper, we identify a suitable data layout
that provides the required query capabilities while preserving the key
properties of blockchain technology. Our goal is to overcome the limitations of
current data access models in blockchains, such as the reliance on auxiliary
data storages and error-prone smart contracts. To this end, we compare four
promising data layouts with data models derived from document, row, column, and
triple stores in terms of schema flexibility, read pattern generality, and
relational algebra suitability. We then assess the most suitable data layout
for blockchain technology.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2024-01&engl=1}
}
@inproceedings {INPROC-2023-07,
author = {Andrea Fieschi and Yunxuan Li and Pascal Hirmer and Christoph Stach and Bernhard Mitschang},
title = {{Privacy in Connected Vehicles: Perspectives of Drivers and Car Manufacturers}},
booktitle = {Service-Oriented Computing: 17th Symposium and Summer School, SummerSOC 2023, Heraklion, Crete, Greece, June 25 – July 1, 2023, Revised Selected Papers},
editor = {Marco Aiello and Johanna Barzen and Schahram Dustdar and Frank Leymann},
address = {Cham},
publisher = {Springer Nature Switzerland},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Communications in Computer and Information Science},
volume = {1847},
pages = {59--68},
type = {Conference Paper},
month = {October},
year = {2023},
isbn = {978-3-031-45727-2},
doi = {10.1007/978-3-031-45728-9_4},
keywords = {Connected Vehicles; Privacy; Anonymization},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues},
contact = {Senden Sie eine E-Mail an Andrea Fieschi (Andrea.Fieschi@ipvs.uni-stuttgart.de) oder Yunxuan Li (Yunxuan.Li@ipvs.uni-stuttgart.de).},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The digital revolution has led to significant technological advancements in the
automotive industry, enabling vehicles to process and share information with
other vehicles and the cloud. However, as data sharing becomes more prevalent,
privacy protection has become an essential issue. In this paper, we explore
various privacy challenges regarding different perspectives of drivers and car
manufacturers. We also propose general approaches to overcome these challenges
with respect to their individual needs. Finally, we highlight the importance of
collaboration between drivers and car manufacturers to establish trust and
achieve better privacy protection.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2023-07&engl=1}
}
@inproceedings {INPROC-2023-06,
author = {Jan Schneider and Christoph Gr{\"o}ger and Arnold Lutsch and Holger Schwarz and Bernhard Mitschang},
title = {{Assessing the Lakehouse: Analysis, Requirements and Definition}},
booktitle = {Proceedings of the 25th International Conference on Enterprise Information Systems, ICEIS 2023, Volume 1, Prague, Czech Republic, April 24-26, 2023},
editor = {Joaquim Filipe and Michal Smialek and Alexander Brodsky and Slimane Hammoudi},
address = {Prague},
publisher = {SciTePress},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {44--56},
type = {Conference Paper},
month = {May},
year = {2023},
isbn = {978-989-758-648-4},
issn = {2184-4992},
doi = {10.5220/0011840500003467},
keywords = {Lakehouse; Data Warehouse; Data Lake; Data Management; Data Analytics},
language = {English},
cr-category = {H.2.4 Database Management Systems,
H.2.7 Database Administration,
H.2.8 Database Applications},
ee = {https://www.scitepress.org/PublicationsDetail.aspx?ID=9ydI3Lyl2Fk=,
https://doi.org/10.5220/0011840500003467},
contact = {jan.schneider@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The digital transformation opens new opportunities for enterprises to optimize
their business processes by applying data-driven analysis techniques. For
storing and organizing the required huge amounts of data, different types of
data platforms have been employed in the past, with data warehouses and data
lakes being the most prominent ones. Since they possess rather contrary
characteristics and address different types of analytics, companies typically
utilize both of them, leading to complex architectures with replicated data and
slow analytical processes. To counter these issues, vendors have recently been
making efforts to break the boundaries and to combine features of both worlds
into integrated data platforms. Such systems are commonly called lakehouses and
promise to simplify enterprise analytics architectures by serving all kinds of
analytical workloads from a single platform. However, it remains unclear how
lakehouses can be characterized, since existing definitions focus al most
arbitrarily on individual architectural or functional aspects and are often
driven by marketing. In this paper, we assess prevalent definitions for
lakehouses and finally propose a new definition, from which several technical
requirements for lakehouses are derived. We apply these requirements to several
popular data management tools, such as Delta Lake, Snowflake and Dremio in
order to evaluate whether they enable the construction of lakehouses.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2023-06&engl=1}
}
@inproceedings {INPROC-2023-05,
author = {Thomas Ackermann and Robert Miehe and Peter Reimann and Bernhard Mitschang and Ralf Takors and Thomas Bauernhansl},
title = {{A Cross-Disciplinary Training Concept for Future Technologists in the Dawn of Biointelligent Production Systems}},
booktitle = {Procedia CIRP: Proceedings of 13th CIRP Conference on Learning Factories (CIRP CLF)},
publisher = {Elsevier BV},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Conference Paper},
month = {May},
year = {2023},
keywords = {Biointelligent systems; Biological transformation; Converging technologies; Qualification},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Biologicalization is considered one of the most important transformation
processes in industry alongside digitalization. This work presents a
qualification concept within the Stuttgart Biointelligent Manufacturing
Framework (BioMEFUS), which is intended to provide skills and experiences at
the intersections between manufacturing and process engineering, computer
science and life science. Life cycle management, production methods and
engineering of components towards the development and implementation of
biointelligent systems are considered as the major engineering platforms of the
framework. The qualification concept is developed for early stage researchers
(ESRs) at the doctorate stage. It provides a mapping of individual research
projects in the field of biointelligent production systems and contains
subject-related and methodological building blocks for the formation of future
experts and decision-makers in the course of biological transformation.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2023-05&engl=1}
}
@inproceedings {INPROC-2023-04,
author = {Julius Voggesberger and Peter Reimann and Bernhard Mitschang},
title = {{Towards the Automatic Creation of Optimized Classifier Ensembles}},
booktitle = {Proceedings of the 25th International Conference on Enterprise Information Systems (ICEIS 2023)},
publisher = {SciTePress},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {614--621},
type = {Conference Paper},
month = {April},
year = {2023},
keywords = {Classifier Ensembles; Classifier Diversity; Decision Fusion; AutoML; Machine Learning},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Classifier ensemble algorithms allow for the creation of combined machine
learning models that are more accurate and generalizable than individual
classifiers. However, creating such an ensemble is complex, as several
requirements must be fulfilled. An expert has to select multiple classifiers
that are both accurate and diverse. In addition, a decision fusion algorithm
must be selected to combine the predictions of these classifiers into a
consensus decision. Satisfying these requirements is challenging even for
experts, as it requires a lot of time and knowledge. In this position paper, we
propose to automate the creation of classifier ensembles. While there already
exist several frameworks that automatically create multiple classifiers, none
of them meet all requirements to build optimized ensembles based on these
individual classifiers. Hence, we introduce and compare three basic approaches
that tackle this challenge. Based on the comparison results, we propose one of
the approaches that best meets the requirements to lay the foundation for
future work.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2023-04&engl=1}
}
@inproceedings {INPROC-2023-03,
author = {Yannick Wilhelm and Peter Reimann and Wolfgang Gauchel and Steffen Klein and Bernhard Mitschang},
title = {{PUSION- A Generic and Automated Framework for Decision Fusion}},
booktitle = {Proceedings of the 39th IEEE International Conference on Data Engineering (ICDE 2023)},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Conference Paper},
month = {April},
year = {2023},
keywords = {Classifier ensembles; decision fusion; automated decision fusion; hybrid fault diagnosis},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Combining two or more classifiers into an ensemble and fusing the individual
classifier decisions to a consensus decision can improve the accuracy for a
classification problem. The classification improvement of the fusion result
depends on numerous factors, such as the data set, the combination scenario,
the decision fusion algorithm, as well as the prediction accuracies and
diversity of the multiple classifiers to be combined. Due to these factors, the
best decision fusion algorithm for a given decision fusion problem cannot be
generally determined in advance. In order to support the user in combining
classifiers and to achieve the best possible fusion result, we propose the
PUSION (Python Universal fuSION) framework, a novel generic and automated
framework for decision fusion of classifiers. The framework includes 14
decision fusion algorithms and covers a total of eight different combination
scenarios for both multi-class and multi-label classification problems. The
introduced concept of AutoFusion detects the combination scenario for a given
use case, automatically selects the applicable decision fusion algorithms and
returns the decision fusion algorithm that leads to the best fusion result. The
framework is evaluated with two real-world case studies in the field of fault
diagnosis. In both case studies, the consensus decision of multiple classifiers
and heterogeneous fault diagnosis methods significantly increased the overall
classification accuracy. Our evaluation results show that our framework is of
practical relevance and reliably finds the best performing decision fusion
algorithm for a given combination task.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2023-03&engl=1}
}
@inproceedings {INPROC-2023-02,
author = {Dennis Treder-Tschechlov and Peter Reimann and Holger Schwarz and Bernhard Mitschang},
title = {{Approach to Synthetic Data Generation for Imbalanced Multi-class Problems with Heterogeneous Groups}},
booktitle = {Tagungsband der 20. Fachtagung Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2019)},
publisher = {GI Gesellschaft f{\"u}r Informatik e.V. (GI)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Informatics (LNI)},
pages = {329--351},
type = {Conference Paper},
month = {March},
year = {2023},
keywords = {Machine learning; classification; data generation; real-world data characteristics},
language = {English},
cr-category = {H.2.8 Database Applications},
ee = {https://dl.gi.de/bitstream/handle/20.500.12116/40320/B3-5.pdf?},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {To benchmark novel classification algorithms, these algorithms should be
evaluated on data with characteristics that also appear in real-world use
cases. Important data characteristics that often lead to challenges for
classification approaches are multi-class imbalance and heterogeneous groups.
Heterogeneous groups are sets of real-world entities, where the classification
patterns may vary among different groups and where the groups are typically
imbalanced in the data. Real-world data that comprise these characteristics are
usually not publicly available, e.g., because they constitute sensitive patient
information or due to privacy concerns. Further, the manifestations of the
characteristics cannot be controlled specifically on real-world data. A more
rigorous approach is to synthetically generate data such that different
manifestations of the characteristics can be controlled as well. However,
existing data generators are not able to generate data that feature both data
characteristics, i.e., multi-class imbalance and heterogeneous groups. In this
paper, we propose an approach that fills this gap as it allows to synthetically
generate data that exhibit both characteristics. We make use of a taxonomy
model that organizes real-world entities in domain-specific heterogeneous
groups to generate data reflecting the characteristics of these groups.
Further, we incorporate probability distributions to reflect the imbalances of
multiple classes and groups from real-world use cases. The evaluation shows
that our approach can generate data that feature the data characteristics
multi-class imbalance and heterogeneous groups and that it allows to control
different manifestations of these characteristics.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2023-02&engl=1}
}
@inproceedings {INPROC-2023-01,
author = {Yunxuan Li and Pascal Hirmer and Christoph Stach},
title = {{CV-Priv: Towards a Context Model for Privacy Policy Creation for Connected Vehicles}},
booktitle = {Proceedings of the 21st International Conference on Pervasive Computing and Communications Workshops},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--6},
type = {Conference Paper},
month = {March},
year = {2023},
keywords = {Context Modeling; Ontology; Privacy Policy; Privacy-Preserving; Connected Vehicle},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
K.6.5 Security and Protection},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Connected vehicles are becoming progressively capable of collecting,
processing, and sharing data, which leads to a growing concern about privacy in
the automotive domain. However, research has shown that although users are
highly concerned about their privacy, they usually find it difficult to
configure privacy settings. This is because the privacy context, which
represents the privacy circumstance a driver faces during the privacy policy
creation, is highly complex. To create custom privacy policies, drivers must
consider the privacy context information, such as what service is requesting
data from which vehicle sensor, or what privacy countermeasures are available
for vehicles and satisfy certain privacy properties. This easily leads to
information and choice overhead. Therefore, we propose the novel ontology-based
privacy context model, CV-Priv, for the modeling of such privacy context
information for creating custom privacy policies in the automotive domain. In
this paper, we analyze the design requirements for a privacy context model
based on challenges drivers might face during the privacy policy creation
phase. We also demonstrate how CV-Priv can be utilized by context-aware systems
to help drivers transform their fuzzy privacy requirements into sound privacy
policies.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2023-01&engl=1}
}
@inproceedings {INPROC-2022-09,
author = {Jan Schneider and Pascal Hirmer},
title = {{Enhancing IoT Platforms for Autonomous Device Discovery and Selection}},
booktitle = {Service-Oriented Computing},
editor = {Johanna Barzen and Frank Leymann and Schahram Dustdar},
publisher = {Springer International Publishing},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Communications in Computer and Information Science},
volume = {1603},
pages = {24--44},
type = {Conference Paper},
month = {October},
year = {2022},
isbn = {978-3-031-18304-1},
keywords = {Internet of Things; IoT platforms; Device discovery},
language = {English},
cr-category = {C.2.1 Network Architecture and Design,
C.2.4 Distributed Systems},
ee = {https://doi.org/10.1007/978-3-031-18304-1_2},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The Internet of Things (IoT) encompasses a variety of technologies that enable
the formation of adaptive and flexible networks from heterogeneous devices.
Along with the rising number of applications, the amount of devices within IoT
ecosystems is constantly increasing. In order to cope with this inherent
complexity and to enable efficient administration and orchestration of devices,
IoT platforms have emerged in recent years. While many IoT platforms empower
users to define application logic for use cases and execute it within an
ecosystem, they typically rely on static device references, leading to huge
manual maintenance efforts and low robustness. In this paper, we present an
approach that allows IoT platforms to autonomously and reliably execute
pre-defined use cases by automatically discovering and selecting the most
suitable devices. It establishes loose coupling and hence does not impose major
technical constraints on the ecosystems in which it is operated.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2022-09&engl=1}
}
@inproceedings {INPROC-2022-08,
author = {Rebecca Eichler and Christoph Gr{\"o}ger and Eva Hoos and Christoph Stach and Holger Schwarz and Bernhard Mitschang},
title = {{Establishing the Enterprise Data Marketplace: Characteristics, Architecture, and Challenges}},
booktitle = {Proceedings of the Workshop on Data Science for Data Marketplaces in Conjunction with the 48th International Conference on Very Large Data Bases},
editor = {Xiaohui Yu and Jian Pei},
publisher = {-},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--12},
type = {Workshop Paper},
month = {September},
year = {2022},
language = {English},
cr-category = {E.m Data Miscellaneous,
H.3.7 Digital Libraries,
H.4.m Information Systems Applications Miscellaneous},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Companies today have increasing amounts of data at their disposal, most of
which is not used, leaving the data value unexploited. In order to leverage the
data value, the data must be democratized, i.e., made available to the company
employees. In this context, the use of enterprise data marketplaces, platforms
for trading data within a company, are proposed. However, specifics of
enterprise data marketplaces and how these can be implemented have not been
investigated in literature so far. To shed light on these topics, we illustrate
the characteristics of an enterprise data marketplace and highlight according
marketplace requirements. We provide an enterprise data marketplace
architecture, discuss how it integrates into a company's system landscape and
present an enterprise data marketplace prototype. Finally, we examine
organizational and technical challenges which arise when operating a
marketplace in the enterprise context. In this paper, we thereby present the
enterprise data marketplace as a distinct marketplace type and provide the
basis for establishing it within a company.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2022-08&engl=1}
}
@inproceedings {INPROC-2022-07,
author = {Yunxuan Li and Pascal Hirmer and Christoph Stach and Bernhard Mitschang},
title = {{Ensuring Situation-Aware Privacy for Connected Vehicles}},
booktitle = {Proceedings of the 12th International Conference on the Internet of Things (IoT); Delft, Netherlands, November 7 - 10, 2022},
editor = {Evangelos Niforatos and Gerd Kortuem and Nirvana Meratnia and Josh Siegel and Florian Michahelles},
address = {New York, NY, USA},
publisher = {ACM},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {135--138},
type = {Conference Paper},
month = {November},
year = {2022},
isbn = {978-1-4503-9665-3},
doi = {10.1145/3567445.3569163},
keywords = {Connected Vehicle; Situation-Awareness; Privacy-Preserving},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
K.6.5 Security and Protection},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {As technology advances in new sensors and software, modern vehicles become
increasingly intelligent. To date, connected vehicles can collect, process, and
share data with other entities in connected vehicle environments. However, in
terms of data collection and exchange, privacy becomes a central issue. It is
challenging to preserve privacy in connected vehicle environments when the
privacy demands of drivers could change from situation to situation even for
the same service. In this paper, we analyze the requirements for a
privacy-preserving system in connected vehicle environments with a focus on
situation-awareness and safety aspects. Based on the analysis, we propose a
novel situation-aware privacy-preserving framework for connected vehicles. Our
framework supports individual privacy protections for specific end-point
services and situation-aware privacy protections for different circumstances.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2022-07&engl=1}
}
@inproceedings {INPROC-2022-06,
author = {Julian Ziegler and Peter Reimann and Christoph Schulz and Florian Keller and Bernhard Mitschang},
title = {{A Graph Structure to Discover Patterns in Unstructured Processes of Product Development}},
booktitle = {Proceedings of the 23rd International Conference on Information Reuse and Integration for Data Science (IRI 2022)},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Conference Paper},
month = {August},
year = {2022},
keywords = {Process Discovery; Unstructured Processes; Process Patterns; Graph Data; Frequent Subgraph Mining},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {A well-known information reuse approach is to exploit event logs for process
discovery and optimization. However, process discovery is rarely used for
product development projects. This is because information systems in product
development, e. g., Product-Lifecycle-Management (PLM) systems, do not provide
the event logs required by process discovery algorithms. Additionally, existing
algorithms struggle with development projects, as these are unstructured and
rich in variety. In this paper, we propose a novel approach to process
discovery in order to make it applicable and tailored to product development
projects. Instead of using flat event logs, we provide a graph-based data
structure that is able to represent both activities and data of product
development projects with the dataflow between activities. Based on this
structure, we can leverage provenance available in PLM systems. Furthermore, we
may use frequent subgraph mining to discover process patterns. Such patterns
are well suited to describe different variants and common sub-processes of
unstructured processes. Using a prototype, we evaluate this approach and
successfully discover prevailing patterns. These patterns may be used by
engineers to support their decision-making or help improve the execution of
development projects.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2022-06&engl=1}
}
@inproceedings {INPROC-2022-05,
author = {Rebecca Eichler and Christoph Gr{\"o}ger and Eva Hoos and Holger Schwarz and Bernhard Mitschang},
title = {{Data Shopping — How an Enterprise Data Marketplace Supports Data Democratization in Companies}},
booktitle = {Proceedings of the 34th International Conference on Intelligent Information Systems},
editor = {Jochen De Weerdt and Artem Polyvyanyy},
address = {Stuttgart},
publisher = {Springer International Publishing},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Rebecca Eichler},
pages = {19--26},
type = {Conference Paper},
month = {May},
year = {2022},
isbn = {https://doi.org/10.1007/978-3-031-07481-3_3},
keywords = {Data Marketplace; Data Sharing; Data Democratization},
language = {English},
cr-category = {H.0 Information Systems General},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {To exploit the company's data value, employees must be able to find, understand
and access it. The process of making corporate data available to the majority
of the company's employees is referred to as data democratization. In this
work, we present the current state and challenges of data democratization in
companies, derived from a comprehensive literature study and expert interviews
we conducted with a manufacturer. In this context a data consumer's journey is
presented that reflects the required steps, tool types and roles for finding,
understanding and accessing data in addition to revealing three data
democratization challenges. To address these challenges we propose the use of
an enterprise data marketplace, a novel type of information system for sharing
data within the company. We developed a prototype based on which a suitability
assessment of a data marketplace yields an improved consumer journey and
demonstrates that the marketplace addresses the data democratization challenges
and consequently, shows that the marketplace is suited for realizing data
democratization.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2022-05&engl=1}
}
@inproceedings {INPROC-2022-03,
author = {Marco Spie{\ss} and Peter Reimann and Christian Weber and Bernhard Mitschang},
title = {{Analysis of Incremental Learning andWindowing to handle Combined Dataset Shifts on Binary Classification for Product Failure Prediction}},
booktitle = {Proceedings of the 24th International Conference on Enterprise Information Systems (ICEIS 2022)},
publisher = {SciTePress},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Conference Paper},
month = {April},
year = {2022},
keywords = {Binary Classification; Dataset Shift; Incremental Learning; Product Failure Prediction; Windowing.},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Dataset Shifts (DSS) are known to cause poor predictive performance in
supervised machine learning tasks. We present a challenging binary
classification task for a real-world use case of product failure prediction.
The target is to predict whether a product, e. g., a truck may fail during the
warranty period. However, building a satisfactory classifier is difficult,
because the characteristics of underlying training data entail two kinds of
DSS. First, the distribution of product configurations may change over time,
leading to a covariate shift. Second, products gradually fail at different
points in time, so that the labels in training data may change, which may a
concept shift. Further, both DSS show a trade-off relationship, i. e.,
addressing one of them may imply negative impacts on the other one. We discuss
the results of an experimental study to investigate how different approaches to
addressing DSS perform when they are faced with both a covariate and a concept
shift. Thereby, we prove that existing approaches, e. g., incremental learning
and windowing, especially suffer from the trade-off between both DSS.
Nevertheless, we come up with a solution for a data-driven classifier that
yields better results than a baseline solution that does not address DSS.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2022-03&engl=1}
}
@inproceedings {INPROC-2022-02,
author = {Florian Hermann and Bowen Chen and Golsa Ghasemi and Valentin Stegmaier and Thomas Ackermann and Peter Reimann and Sabrina Vogt and Thomas Graf and Michael Weyrich},
title = {{A Digital Twin Approach for the Prediction of the Geometry of Single Tracks Produced by Laser Metal Deposition}},
booktitle = {Procedia CIRP: Proceedings of the 55th CIRP Conference on Manufacturing Systems (CIRP CMS 2022)},
publisher = {Elsevier BV},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Conference Paper},
month = {June},
year = {2022},
keywords = {Laser metal deposition; Software-defined manufacturing; Digital Twin; Asset Administration Shell},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Flexible manufacturing processes such as laser metal deposition have a high
potential for a production solely defined by software to cope with the current
challenges of production systems. The determination of suitable machine
parameters for the production of novel materials and geometries however
requires extensive experimental effort. Existing simulative approaches do not
offer sufficient accuracy to predict the relevant machine parameters in a
satisfactory way. This paper presents a new concept, in which we apply a
digital twin to provide a step towards a fully software-defined and predictable
laser metal deposition process. The presented concept includes relevant data of
the machines as well as data-driven machine learning models and physics-based
simulation models. This enables a more reliable prediction of geometries of
single tracks which was validated on a laser metal deposition machine.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2022-02&engl=1}
}
@inproceedings {INPROC-2022-01,
author = {Christoph Stach and Cl{\'e}mentine Gritti and Dennis Przytarski and Bernhard Mitschang},
title = {{Can Blockchains and Data Privacy Laws be Reconciled? A Fundamental Study of How Privacy-Aware Blockchains are Feasible}},
booktitle = {Proceedings of the 37th ACM/SIGAPP Symposium On Applied Computing},
editor = {Jiman Hong and Miroslav Bures and Ronald Petrlic and Christoph Sorge},
address = {Brno},
publisher = {ACM},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--10},
type = {Conference Paper},
month = {April},
year = {2022},
isbn = {978-1-4503-8713-2},
doi = {10.1145/3477314.3506986},
keywords = {blockchains; immutable; tamper-proof; GDPR; privacy assessment},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
K.6.5 Security and Protection},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de.},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Due to the advancing digitalization, the importance of data is constantly
increasing. Application domains such as smart cars, smart cities, or smart
healthcare rely on the permanent availability of large amounts of data to all
parties involved. As a result, the value of data increases, making it a
lucrative target for cyber-attacks. Particularly when human lives depend on the
data, additional protection measures are therefore important for data
management and provision. Blockchains, i.e., decentralized, immutable, and
tamper-proof data stores, are becoming increasingly popular for this purpose.
Yet, from a data protection perspective, the immutable and tamper-proof
properties of blockchains pose a privacy concern. In this paper, we therefore
investigate whether blockchains are in compliance with the General Data
Protection Regulation (GDPR) if personal data are involved. To this end, we
elaborate which articles of the GDPR are relevant in this regard and present
technical solutions for those legal requirements with which blockchains are in
conflict. We further identify open research questions that need to be addressed
in order to achieve a privacy-by-design blockchain system.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2022-01&engl=1}
}
@inproceedings {INPROC-2021-11,
author = {Christoph Stach and Julia Br{\"a}cker and Rebecca Eichler and Corinna Giebler and Bernhard Mitschang},
title = {{Demand-Driven Data Provisioning in Data Lakes: BARENTS - A Tailorable Data Preparation Zone}},
booktitle = {Proceedings of the 23rd International Conference on Information Integration and Web-based Applications \& Services (iiWAS2021); Linz, Austria, November 29-December 1, 2021},
editor = {Maria Indrawan-Santiago and Eric Pardede and Ivan Luiz Salvadori and Matthias Steinbauer and Ismail Khalil and Gabriele Kotsis},
address = {New York, NY, United States},
publisher = {Association for Computing Machinery (ACM)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--12},
type = {Conference Paper},
month = {November},
year = {2021},
isbn = {978-1-4503-9556-4/21/11},
doi = {10.1145/3487664.3487784},
keywords = {data pre-processing; data transformation; knowledge modeling; ontology; data management; Data Lakes; zone model; food analysis},
language = {English},
cr-category = {H.2.7 Database Administration,
E.2 Data Storage Representations,
H.3.3 Information Search and Retrieval,
H.2.8 Database Applications},
contact = {Senden Sie eine E-Mail an christoph.stach@ipvs.uni-stuttgart.de.},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Data has never been as significant as it is today. It can be acquired virtually
at will on any subject. Yet, this poses new challenges towards data management,
especially in terms of storage (data is not consumed during processing, i.e.,
the data volume keeps growing), flexibility (new applications emerge), and
operability (analysts are no IT experts). The goal has to be a demand-driven
data provisioning, i.e., the right data must be available in the right form at
the right time. Therefore, we introduce a tailorable data preparation zone for
Data Lakes called BARENTS. It enables users to model in an ontology how to
derive information from data and assign the information to use cases. The data
is automatically processed based on this model and the refined data is made
available to the appropriate use cases. Here, we focus on a resource-efficient
data management strategy. BARENTS can be embedded seamlessly into established
Big Data infrastructures, e.g., Data Lakes.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2021-11&engl=1}
}
@inproceedings {INPROC-2021-10,
author = {Alejandro Villanueva Zacarias and Christian Weber and Peter Reimann and Bernhard Mitschang},
title = {{AssistML: A Concept to Recommend ML Solutions for Predictive Use Cases}},
booktitle = {Proceedings of the 8th IEEE International Conference on Data Science and Advanced Analytics (DSAA 2021)},
address = {Porto, Portugal},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Conference Paper},
month = {October},
year = {2021},
keywords = {Recommender Systems; Machine Learning; Meta Learning},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The adoption of machine learning (ML) in organizations is characterized by the
use of multiple ML software components. Citizen data scientists face practical
requirements when building ML systems, which go beyond the known challenges of
ML, e. g., data engineering or parameter optimization. They are expected to
quickly identify ML system options that strike a suitable trade-off across
multiple performance criteria. These options also need to be understandable for
non-technical users. Addressing these practical requirements represents a
problem for citizen data scientists with limited ML experience. This calls for
a method to help them identify suitable ML software combinations. Related work,
e. g., AutoML systems, are not responsive enough or cannot balance different
performance criteria. In this paper, we introduce AssistML, a novel concept to
recommend ML solutions, i. e., software systems with ML models, for predictive
use cases. AssistML uses metadata of existing ML solutions to quickly identify
and explain options for a new use case. We implement the approach and evaluate
it with two exemplary use cases. Results show that AssistML proposes ML
solutions that are in line with users{\^a}€™ performance preferences in seconds.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2021-10&engl=1}
}
@inproceedings {INPROC-2021-09,
author = {Eduard Wagner and Bernd Keller and Peter Reimann and Christoph Gr{\"o}ger and Dieter Spath},
title = {{Advanced Analytics for Evaluating Critical Joining Technologies in Automotive Body Structures and Body Shops}},
booktitle = {Proceedings of the 15th CIRP Conference on Intelligent Computation in Manufacturing Engineering (CIRP ICME)},
publisher = {Elsevier},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Conference Paper},
month = {July},
year = {2021},
keywords = {Body Shop; Data Analytics; Data Mining; Advanced Analytics; Machine Learning},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The product development process within the automotive industry is subject to
changing demands due to internal and external influences. These influences and
adjustments especially affect the car body and its inherent joining technology,
as critical stages of variant creation. However, current literature does not
offer a suitable analytical method to identify and assess these critical
influences. We propose an advanced analytics approach that combines data mining
and machine learning techniques within the car body substructure. The
evaluation within the MercedesBenz AG shows that our approach facilitates a
quantitative assessment of unknown interdependencies between car body modules
and corresponding joining technique},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2021-09&engl=1}
}
@inproceedings {INPROC-2021-08,
author = {Alexander Birk and Yannick Wilhelm and Simon Dreher and Christian Flack and Peter Reimann and Christoph Gr{\"o}ger},
title = {{A Real-World Application of Process Mining for Data-Driven Analysis of Multi-Level Interlinked Manufacturing Processes}},
booktitle = {Procedia CIRP: Proceedings of the 54th CIRP Conference on Manufacturing Systems (CIRP CMS 2021)},
publisher = {Elsevier},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Conference Paper},
month = {September},
year = {2021},
keywords = {Process Mining; Multi-level Interlinked Manufacturing Process; Heterogeneous Data Sources; Data Integration},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Process Mining (PM) has huge potential for manufacturing process analysis.
However, there is little research on practical applications. We investigate a
real-world manufacturing process of pneumatic valves. The manufacturing process
comprises interlinked events at the superordinate business process level and at
the subordinate machine level, making its analysis based on PM challenging.We
show how to integrate heterogeneous data sources and give examples how PM
enables a deeper understanding of the manufacturing process, thereby helping to
uncover optimization potentials. Furthermore, we discuss challenges in data
integration and point out limitations of current PM techniques in
manufacturing.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2021-08&engl=1}
}
@inproceedings {INPROC-2021-07,
author = {Julian Ziegler and Peter Reimann and Florian Keller and Bernhard Mitschang},
title = {{A Metadata Model to Connect Isolated Data Silos and Activities of the CAE Domain}},
booktitle = {Proceedings of the 33rd International Conference on Advanced Information Systems Engineering (CAiSE)},
publisher = {Springer International Publishing},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {213--228},
type = {Conference Paper},
month = {June},
year = {2021},
keywords = {Metadata Models; Graphs; Computer-aided Engineering},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Computer-aided engineering (CAE) applications support the digital
transformation of the manufacturing industry. They facilitate virtual product
development and product testing via computer simulations. CAE applications
generate vast quantities of heterogeneous data. Domain experts struggle to
access and analyze them, because such engineering data are not sufficiently
described with metadata. In this paper, we characterize the CAE domain and
identify unsolved challenges for a tailored data and metadata management. For
instance, work activities in product development projects and their
relationships to data are not represented explicitly in current metadata
models. We propose a metadata model that addresses all challenges and provides
a connected view on all CAE data, metadata, and work activities of development
projects. We validate the feasibility of our metadata model through a
prototypical implementation and its application to a real-world use case. This
verifies that our metadata model addresses the CAE-specific challenges and this
way eases the task of domain experts to exploit relevant data.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2021-07&engl=1}
}
@inproceedings {INPROC-2021-06,
author = {Rebecca Eichler and Corinna Giebler and Christoph Gr{\"o}ger and Eva Hoos and Holger Schwarz and Bernhard Mitschang},
title = {{Enterprise-Wide Metadata Management - An Industry Case on the Current State and Challenges}},
booktitle = {24thInternational Conference on Business Information Systems},
editor = {Witold Abramowicz and S{\"o}ren Auer and Lewa\&\#324 and El\&\#380 Ska and Bieta},
publisher = {TIB Open Publishing},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {269--279},
type = {Conference Paper},
month = {July},
year = {2021},
doi = {https://doi.org/10.52825/bis.v1i.47},
language = {English},
cr-category = {A.0 General Literature, General},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Abstract. Metadata management is a crucial success factor for companies today,
as for example, it enables exploiting data value fully or enables legal
compliance. With the emergence of new concepts, such as the data lake, and new
objectives, such as the enterprise-wide sharing of data, metadata management
has evolved and now poses a renewed challenge for companies. In this context,
we interviewed a globally active manufacturer to reveal how metadata management
is implemented in practice today and what challenges companies are faced with
and whether these constitute research gaps. As an outcome, we present the
company{\^a}€™s metadata management goals and their corresponding solution
approaches and challenges. An evaluation of the challenges through a literature
and tool review yields three research gaps, which are concerned with the
topics: (1) metadata management for data lakes, (2) categorizations and
compositions of metadata management tools for comprehensive metadata
management, and (3) the use of data marketplaces as metadata-driven exchange
platforms within an enterprise. The gaps lay the groundwork for further
research activities in the field of metadata management and the industry case
represents a starting point for research to realign with real-world industry
needs.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2021-06&engl=1}
}
@inproceedings {INPROC-2021-05,
author = {Corinna Giebler and Christoph Gr{\"o}ger and Eva Hoos and Rebecca Eichler and Holger Schwarz and Bernhard Mitschang},
title = {{The Data Lake Architecture Framework}},
booktitle = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2021), 19. Fachtagung des GI-Fachbereichs Datenbanken und Informationssysteme (DBIS), 13.-17. September 2021, Dresden, Germany},
publisher = {Gesellschaft f{\"u}r Informatik},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {351--370},
type = {Conference Paper},
month = {September},
year = {2021},
doi = {10.18420/btw2021-19},
language = {English},
cr-category = {H.4 Information Systems Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {During recent years, data lakes emerged as a way to manage large amounts of
heterogeneous data for modern data analytics. Although various work on
individual aspects of data lakes exists, there is no comprehensive data lake
architecture yet. Concepts that describe themselves as a {\^a}€œdata lake
architecture{\^a}€ are only partial. In this work, we introduce the data lake
architecture framework. It supports the definition of data lake architectures
by defining nine architectural aspects, i.e., perspectives on a data lake, such
as data storage or data modeling, and by exploring the interdependencies
between these aspects. The included methodology helps to choose appropriate
concepts to instantiate each aspect. To evaluate the framework, we use it to
configure an exemplary data lake architecture for a real-world data lake
implementation. This final assessment shows that our framework provides
comprehensive guidance in the configuration of a data lake architecture.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2021-05&engl=1}
}
@inproceedings {INPROC-2021-04,
author = {Manuel Fritz and Gang Shao and Holger Schwarz},
title = {{Automatic Selection of Analytic Platforms with ASAP-DM}},
booktitle = {Proceedings of the 33rd International Conference on Scientific and Statistical Database Management},
publisher = {ACM},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {220--225},
type = {Conference Paper},
month = {July},
year = {2021},
isbn = {9781450384131},
doi = {10.1145/3468791.3468802},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The plethora of available analytic platforms escalates the difficulty of
selecting the most appropriate platform for a certain data mining task and
datasets with varying characteristics. Especially novice analysts experience
difficulties to keep up with the latest technical developments. In this demo,
we present the ASAP-DM framework. ASAP-DM is able to automatically select a
well-performing analytic platform for a given data mining task via an intuitive
web interface, thus especially supporting novice analysts. The take-aways for
demo attendees are: (1) a good understanding of the challenges of various data
mining workloads, dataset characteristics, and the effects on the selection of
analytic platforms, (2) useful insights on how ASAP-DM internally works, and
(3) how to benefit from ASAP-DM for exploratory data analysis.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2021-04&engl=1}
}
@inproceedings {INPROC-2021-03,
author = {Dennis Tschechlov and Manuel Fritz and Holger Schwarz},
title = {{AutoML4Clust: Efficient AutoML for Clustering Analyses}},
booktitle = {Proceedings of the 24th International Conference on Extending Database Technology (EDBT)},
publisher = {Online},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--6},
type = {Conference Paper},
month = {March},
year = {2021},
doi = {10.5441/002/EDBT.2021.32},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Data analysis is a highly iterative process. In order to achieve valuable
analysis results, analysts typically execute many configurations, i.e.,
algorithms and their hyperparameter settings, based on their domain knowledge.
While experienced analysts may be able to define small search spaces for
promising configurations, especially novice analysts define large search spaces
due to their lack of domain knowledge. In the worst case, they perform an
exhaustive search throughout the whole search space, resulting in infeasible
runtimes. Recent advances in the research area of AutoML address this challenge
by supporting novice analysts in the combined algorithm selection and
hyperparameter optimization (CASH) problem for supervised learning tasks.
However, no such systems exist for unsupervised learning tasks, such as the
prevalent task of clustering analysis. In this work, we present our novel
AutoML4Clust approach, which efficiently supports novice analysts regarding
CASH for clustering analyses. To the best of our knowledge, this is the first
thoroughly elaborated approach in this area. Our comprehensive evaluation
unveils that AutoML4Clust significantly outperforms several existing
approaches, as it achieves considerable speedups for the CASH problem, while
still achieving very valuable clustering results.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2021-03&engl=1}
}
@inproceedings {INPROC-2021-02,
author = {Manuel Fritz and Dennis Tschechlov and Holger Schwarz},
title = {{Efficient Exploratory Clustering Analyses with Qualitative Approximations}},
booktitle = {Proceedings of the 24th International Conference on Extending Database Technology (EDBT)},
publisher = {Online},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--6},
type = {Conference Paper},
month = {March},
year = {2021},
doi = {10.5441/002/EDBT.2021.31},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Clustering is a fundamental primitive for exploratory data analyses. Yet,
finding valuable clustering results for previously unseen datasets is a pivotal
challenge. Analysts as well as automated exploration methods often perform an
exploratory clustering analysis, i.e., they repeatedly execute a clustering
algorithm with varying parameters until valuable results can be found. k-center
clustering algorithms, such as k-Means, are commonly used in such exploratory
processes. However, in the worst case, each single execution of k-Means
requires a super-polynomial runtime, making the overall exploratory process on
voluminous datasets infeasible in a reasonable time frame. We propose a novel
and efficient approach for approximating results of k-center clustering
algorithms, thus supporting analysts in an ad-hoc exploratory process for
valuable clustering results. Our evaluation on an Apache Spark cluster unveils
that our approach significantly outperforms the regular execution of a k-center
clustering algorithm by several orders of magnitude in runtime with a
predefinable qualitative demand. Hence, our approach is a strong fit for
clustering voluminous datasets in exploratory settings.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2021-02&engl=1}
}
@inproceedings {INPROC-2020-57,
author = {Simon Dreher and Peter Reimann and Christoph Gr{\"o}ger},
title = {{Application Fields and Research Gaps of Process Mining in Manufacturing Companies}},
booktitle = {Proceedings of INFORMATIK 2020},
editor = {R. H. Reussner and A Koziolek and R. Heinrich},
publisher = {GI Gesellschaft f{\"u}r Informatik e.V. (GI)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {621--634},
type = {Conference Paper},
month = {October},
year = {2020},
keywords = {Process Mining; Application; Production; Manufacturing; SCOR; Literature Review},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {To survive in global competition with increasing cost pressure, manufacturing
companies must continuously optimize their manufacturing-related processes.
Thereby, process mining constitutes an important data-driven approach to gain a
profound understanding of the actual processes and to identify optimization
potentials by applying data mining and machine learning techniques on event
data. However, there is little knowledge about the feasibility and usefulness
of process mining specifically in manufacturing companies. Hence, this paper
provides an overview of potential applications of process mining for the
analysis of manufacturing-related processes. We conduct a systematic literature
review, classify relevant articles according to the
Supply-Chain-Operations-Reference-Model (SCOR-model), identify research gaps,
such as domain-specific challenges regarding unstructured, cascaded and
non-linear processes or heterogeneous data sources, and give practitioners
inspiration which manufacturing-related processes can be analyzed by process
mining techniques.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-57&engl=1}
}
@inproceedings {INPROC-2020-56,
author = {Christian Weber and Peter Reimann},
title = {{MMP - A Platform to Manage Machine Learning Models in Industry 4.0 Environments}},
booktitle = {Proceedings of the IEEE 24th International Enterprise Distributed Object Computing Workshop (EDOCW)},
address = {Eindhoven, The Netherlands},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Demonstration},
month = {July},
year = {2020},
keywords = {Model Management; Machine Learning; Collaborative Data Science},
language = {English},
cr-category = {H.3.4 Information Storage and Retrieval Systems and Software},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In manufacturing environments, machine learning models are being built for
several use cases, such as predictive maintenance and product quality control.
In this context, the various manufacturing processes, machines, and product
variants make it necessary to create and use lots of different machine learning
models. This calls for a software system that is able to manage all these
diverse machine learning models and associated metadata. However, current model
management systems do not associate models with business and domain context to
provide non-expert users with tailored functions for model search and
discovery. Moreover, none of the existing systems provides a comprehensive
overview of all models within an organization. In our demonstration, we present
the MMP, our model management platform that addresses these issues. The MMP
provides a model metadata extractor, a model registry, and a context manager to
store model metadata in a central metadata store. On top of this, the MMP
provides frontend components that offer the above-mentioned functionalities. In
our demonstration, we show two scenarios for model management in Industry 4.0
environments that illustrate the novel functionalities of the MMP. We
demonstrate to the audience how the platform and its metadata, linking models
to their business and domain context, help non-expert users to search and
discover models. Furthermore, we show how to use MMP's powerful visualizations
for model reporting, such as a dashboard and a model landscape view.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-56&engl=1}
}
@inproceedings {INPROC-2020-55,
author = {Corinna Giebler and Christoph Gr{\"o}ger and Eva Hoos and Holger Schwarz and Bernhard Mitschang},
title = {{A Zone Reference Model for Enterprise-Grade Data Lake Management}},
booktitle = {Proceedings of the 24th IEEE Enterprise Computing Conference},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {57--66},
type = {Conference Paper},
month = {October},
year = {2020},
keywords = {Data Lake; Zones; Reference Model; Industry Case; Industry Experience},
language = {English},
cr-category = {H.4 Information Systems Applications},
contact = {Senden Sie eine E-Mail an corinna.giebler@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Data lakes are on the rise as data platforms for any kind of analytics, from
data exploration to machine learning. They achieve the required flexibility by
storing heterogeneous data in their raw format, and by avoiding the need for
pre-defined use cases. However, storing only raw data is inefficient, as for
many applications, the same data processing has to be applied repeatedly. To
foster the reuse of processing steps, literature proposes to store data in
different degrees of processing in addition to their raw format. To this end,
data lakes are typically structured in zones. There exists various zone models,
but they are varied, vague, and no assessments are given. It is unclear which
of these zone models is applicable in a practical data lake implementation in
enterprises. In this work, we assess existing zone models using requirements
derived from multiple representative data analytics use cases of a real-world
industry case. We identify the shortcomings of existing work and develop a zone
reference model for enterprise-grade data lake management in a detailed manner.
We assess the reference model's applicability through a prototypical
implementation for a real-world enterprise data lake use case. This assessment
shows that the zone reference model meets the requirements relevant in practice
and is ready for industry use.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-55&engl=1}
}
@inproceedings {INPROC-2020-54,
author = {Manuel Fritz and Dennis Tschechlov and Holger Schwarz},
title = {{Learning from Past Observations: Meta-Learning for Efficient Clustering Analyses}},
booktitle = {Proceedings of 22nd Big Data Analytics and Knowledge Discovery (DaWaK), 2020},
editor = {Min Song and Il-Yeol Song and Gabriele Kotsis and A Min Tjoa and Ismail Khalil},
publisher = {Springer, Cham},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Computer Science},
volume = {12393},
pages = {364--379},
type = {Conference Paper},
month = {September},
year = {2020},
isbn = {978-3-030-59065-9},
doi = {https://doi.org/10.1007/978-3-030-59065-9_28},
language = {English},
cr-category = {H.3.3 Information Search and Retrieval},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Many clustering algorithms require the number of clusters as input parameter
prior to execution. Since the {\^a}€œbest{\^a}€ number of clusters is most often
unknown in advance, analysts typically execute clustering algorithms multiple
times with varying parameters and subsequently choose the most promising
result. Several methods for an automated estimation of suitable parameters have
been proposed. Similar to the procedure of an analyst, these estimation methods
draw on repetitive executions of a clustering algorithm with varying
parameters. However, when working with voluminous datasets, each single
execution tends to be very time-consuming. Especially in today{\^a}€™s Big Data
era, such a repetitive execution of a clustering algorithm is not feasible for
an efficient exploration. We propose a novel and efficient approach to
accelerate estimations for the number of clusters in datasets. Our approach
relies on the idea of meta-learning and terminates each execution of the
clustering algorithm as soon as an expected qualitative demand is met. We show
that this new approach is generally applicable, i.e., it can be used with
existing estimation methods. Our comprehensive evaluation reveals that our
approach is able to speed up the estimation of the number of clusters by an
order of magnitude, while still achieving accurate estimates.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-54&engl=1}
}
@inproceedings {INPROC-2020-53,
author = {Manuel Fritz and Michael Behringer and Holger Schwarz},
title = {{LOG-Means: Efficiently Estimating the Number of Clusters in Large Datasets}},
booktitle = {Proceedings of the 46th International Conference on Very Large Databases (VLDB)},
editor = {Magdalena Balazinska and Xiaofang Zhou},
publisher = {ACM Digital Library},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Proceedings of the VLDB Endowment},
volume = {13 (12)},
pages = {2118--2131},
type = {Conference Paper},
month = {August},
year = {2020},
isbn = {ISSN 2150-8097},
doi = {https://doi.org/10.14778/3407790.3407813},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Clustering is a fundamental primitive in manifold applications. In order to
achieve valuable results, parameters of the clustering algorithm, e.g., the
number of clusters, have to be set appropriately, which is a tremendous
pitfall. To this end, analysts rely on their domain knowledge in order to
define parameter search spaces. While experienced analysts may be able to
define a small search space, especially novice analysts often define rather
large search spaces due to the lack of in-depth domain knowledge. These search
spaces can be explored in different ways by estimation methods for the number
of clusters. In the worst case, estimation methods perform an exhaustive search
in the given search space, which leads to infeasible runtimes for large
datasets and large search spaces. We propose LOG-Means, which is able to
overcome these issues of existing methods. We show that LOG-Means provides
estimates in sublinear time regarding the defined search space, thus being a
strong fit for large datasets and large search spaces. In our comprehensive
evaluation on an Apache Spark cluster, we compare LOG-Means to 13 existing
estimation methods. The evaluation shows that LOG-Means significantly
outperforms these methods in terms of runtime and accuracy. To the best of our
knowledge, this is the most systematic comparison on large datasets and search
spaces as of today.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-53&engl=1}
}
@inproceedings {INPROC-2020-52,
author = {Daniel Del Gaudio and Maximilian Reichel and Pascal Hirmer},
title = {{A Life Cycle Method for Device Management in Dynamic IoT Environments}},
booktitle = {Proceedings of the 5th International Conference on Internet of Things, Big Data and Security - Volume 1: IoTBDS},
publisher = {SciTePress Digital Library},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {46--56},
type = {Conference Paper},
month = {May},
year = {2020},
keywords = {Internet of Things, Discovery, Device Integration, Decentralization},
language = {English},
cr-category = {C.2.4 Distributed Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In the Internet of Things, interconnected devices communicate with each other
through standardized internet protocols to reach common goals. By doing so,
they enable building complex, self-organizing applications, such as Smart
Cities, or Smart Factories. Especially in large IoT environments, newly
appearing devices as well as leaving or failing IoT devices are a great
challenge. New devices need to be integrated into the application whereas
failing devices need to be dealt with. In a Smart City, newly appearing actors,
for example, smart phones or connected cars, appear and disappear all the time.
Dealing with this dynamic is a great issue, especially when done automatically.
Consequently, in this paper, we introduce A Life Cycle Method for Device
Management in Dynamic IoT Environments. This method enables integrating newly
appearing IoT devices into IoT applications and, furthermore, offers means to
cope with failing devices. Our approach is evaluated through a system
architecture a nd a corresponding prototypical implementation.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-52&engl=1}
}
@inproceedings {INPROC-2020-51,
author = {Daniel Del Gaudio and Pascal Hirmer},
title = {{Fulfilling the IoT Vision: Are We There Yet?}},
booktitle = {In Proceedings of the 5th International Conference on Internet of Things, Big Data and Security - Volume 1: IoTBDS},
publisher = {SciTePress Digital Library},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {367--374},
type = {Conference Paper},
month = {May},
year = {2020},
isbn = {978-989-758-426-8},
keywords = {Internet of Things, Decentralized, Autonomous, Dynamic, Smart},
language = {German},
cr-category = {C.2 Computer-Communication Networks},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The vision of the Internet of Things is enabling self-controlled and
decentralized environments, in which hard- ware devices, equipped with sensors
and actuators communicate with each other trough standardized internet
protocols to reach common goals. The device-to-device communication should be
decentralized and should not necessarily require human interaction. However,
enabling such complex IoT applications, e.g., connected cars, is a big
challenge, since many requirements need to be fulfilled. These requirements
include, for exam- ple, security, privacy, timely data processing, uniform
communication standards, or location-awareness. Based on an intensive
literature review, in this overview paper, we define requirements for such
environments and, in addition, we discuss whether they are fulfilled by
state-of-the-art approaches or whether there still has to be work done in the
future. We conclude this paper by illustrating research gaps that have to be
filled in order to realize the IoT vision.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-51&engl=1}
}
@inproceedings {INPROC-2020-50,
author = {Rebecca Eichler and Corinna Giebler and Christoph Gr{\"o}ger and Holger Schwarz and Bernhard Mitschang},
title = {{HANDLE - A Generic Metadata Model for Data Lakes}},
booktitle = {Big Data Analytics and Knowledge Discovery: 22nd International Conference, DaWaK 2020, Bratislava, Slovakia, September 14–17, 2020, Proceedings},
publisher = {Springer, Cham},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {73--88},
type = {Conference Paper},
month = {September},
year = {2020},
language = {English},
cr-category = {H.2 Database Management},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The substantial increase in generated data induced the development of new
concepts such as the data lake. A data lake is a large storage repository
designed to enable flexible extraction of the data{\^a}€™s value. A key aspect of
exploiting data value in data lakes is the collection and management of
metadata. To store and handle the metadata, a generic metadata model is
required that can reflect metadata of any potential metadata management use
case, e.g., data versioning or data lineage. However, an evaluation of existent
metadata models yields that none so far are sufficiently generic. In this work,
we present HANDLE, a generic metadata model for data lakes, which supports the
flexible integration of metadata, data lake zones, metadata on various granular
levels, and any metadata categorization. With these capabilities HANDLE enables
comprehensive metadata management in data lakes. We show HANDLE{\^a}€™s feasibility
through the application to an exemplary access-use-case and a prototypical
implementation. A comparison with existent models yields that HANDLE can
reflect the same information and provides additional capabilities needed for
metadata management in data lakes.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-50&engl=1}
}
@inproceedings {INPROC-2020-48,
author = {Dennis Przytarski and Christoph Stach and Cl{\'e}mentine Gritti and Bernhard Mitschang},
title = {{A Blueprint for a Trustworthy Health Data Platform Encompassing IoT and Blockchain Technologies}},
booktitle = {Proceedings of the ISCA 29th International Conference on Software Engineering and Data Engineering (Las Vegas, October 2020)},
publisher = {ISCA in Cooperation with IEEE Computer Society},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
pages = {1--10},
type = {Conference Paper},
month = {October},
year = {2020},
language = {English},
cr-category = {H.2.7 Database Administration,
K.6.5 Security and Protection},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {eHealth provides great relief for patients and physicians. This means, patients
autonomously monitor their condition via IoT medical devices and make these
data available to physicians for analyses. This requires a data platform that
takes care of data acquisition, management, and provisioning. As health data
are highly sensitive, there are major concerns regarding data security with
respect to confidentiality, integrity, and authenticity. To this end, we
present a blueprint for constructing a trustworthy health data platform called
SEAL. It provides a lightweight attribute-based authentication mechanism for
IoT devices to validate all involved data sources, there is a fine-grained data
provisioning system to enable data provision according to actual requirements,
and a verification procedure ensures that data cannot be manipulated.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-48&engl=1}
}
@inproceedings {INPROC-2020-45,
author = {Rebecca Eichler and Corinna Giebler and Christoph Gr{\"o}ger and Holger Schwarz and Bernhard Mitschang},
title = {{HANDLE - A Generic Metadata Model for Data Lakes}},
booktitle = {Big Data Analytics and Knowledge Discovery},
editor = {Min Song and Il-Yeol Song and Gabriele Kotsis and A Min Tjoa and Ismail Khalil},
publisher = {Springer Nature Switzerland AG},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Computer Science},
volume = {12393},
pages = {73--88},
type = {Conference Paper},
month = {September},
year = {2020},
doi = {https://doi.org/10.1007/978-3-030-59065-9_7},
keywords = {Metadata management; Metadata model; Data lake},
language = {English},
cr-category = {H.2 Database Management},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2020-45/INPROC-2020-45.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The substantial increase in generated data induced the development of new
concepts such as the data lake. A data lake is a large storage repository
designed to enable flexible extraction of the data's value. A key aspect of
exploiting data value in data lakes is the collection and management of
metadata. To store and handle the metadata, a generic metadata model is
required that can reflect metadata of any potential metadata management use
case, e.g., data versioning or data lineage. However, an evaluation of existent
metadata models yields that none so far are sufficiently generic. In this work,
we present HANDLE, a generic metadata model for data lakes, which supports the
flexible integration of metadata, data lake zones, metadata on various granular
levels, and any metadata categorization. With these capabilities HANDLE enables
comprehensive metadata management in data lakes. We show HANDLE's feasibility
through the application to an exemplary access-use-case and a prototypical
implementation. A comparison with existent models yields that HANDLE can
reflect the same information and provides additional capabilities needed for
metadata management in data lakes.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-45&engl=1}
}
@inproceedings {INPROC-2020-39,
author = {Ana Cristina Franco da Silva and Pascal Hirmer and Jan Schneider and Seda Ulusal and Matheus Tavares Frigo},
title = {{MBP: Not just an IoT Platform}},
booktitle = {2020 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)},
publisher = {Institute of Electrical and Electronics Engineers, Inc. (IEEE)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--3},
type = {Demonstration},
month = {August},
year = {2020},
isbn = {978-1-7281-4716-1},
doi = {10.1109/PerComWorkshops48775.2020.9156156},
keywords = {Internet of Things; Sensor Integration; IoT environments; Complex Event Processing},
language = {English},
cr-category = {H.0 Information Systems General},
ee = {https://ieeexplore.ieee.org/document/9156156},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In this demonstration paper, we introduce the Multi-purpose Binding and
Provisioning Platform (MBP), an open-source IoT platform developed for easy
binding, provisioning, and management of IoT environments. Furthermore, the MBP
enables the simple realization of IoT applications, such as heating,
ventilation, air conditioning (HVAC) systems, by allowing users to create rules
for the IoT environment, in a straightforward and event-condition-action
fashion. The efficient and timely data processing of IoT environments are
assured through underlying complex event processing technologies.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-39&engl=1}
}
@inproceedings {INPROC-2020-38,
author = {Alejandro Villanueva Zacarias and Rachaa Ghabri and Peter Reimann},
title = {{AD4ML: Axiomatic Design to Specify Machine Learning Solutions for Manufacturing}},
booktitle = {Proceedings of the 21st International Conference on Information Reuse and Integration for Data Science},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Conference Paper},
month = {August},
year = {2020},
language = {English},
cr-category = {H.2.8 Database Applications},
contact = {manufacturing; machine-learning; design},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Machine learning is increasingly adopted in manufacturing use cases, e.g., for
fault detection in a production line. Each new use case requires developing its
own machine learning (ML) solution. A ML solution integrates different software
components to read, process, and analyze all use case data, as well as to
finally generate the output that domain experts need for their decision-making.
The process to design a system specification for a ML solution is not
straight-forward. It entails two types of complexity: (1) The technical
complexity of selecting combinations of ML algorithms and software components
that suit a use case; (2) the organizational complexity of integrating
different requirements from a multidisciplinary team of, e.g., domain experts,
data scientists, and IT specialists. In this paper, we propose several
adaptations to Axiomatic Design in order to design ML solution specifications
that handle these complexities. We call this Axiomatic Design for Machine
Learning (AD4ML). We apply AD4ML to specify a ML solution for a fault detection
use case and discuss to what extent our approach conquers the above-mentioned
complexities. We also discuss how AD4ML facilitates the agile design of ML
solutions.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-38&engl=1}
}
@inproceedings {INPROC-2020-37,
author = {Mathias Mormul and Pascal Hirmer and Christoph Stach and Bernhard Mitschang},
title = {{DEAR: Distributed Evaluation of Alerting Rules}},
booktitle = {IEEE 13th International Conference on Cloud Computing (CLOUD)},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--1},
type = {Conference Paper},
month = {December},
year = {2020},
keywords = {cloud monitoring; agent-based; alerting},
language = {English},
cr-category = {H.0 Information Systems General},
contact = {mathias.mormul@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Cloud computing passed the hype cycle long ago and firmly established itself as
a future technology since then. However, to utilize the cloud as
cost-efficiently as possible, a continuous monitoring is key to prevent an
over- or undercommissioning of resources. In large-scaled scenarios, several
challenges for cloud monitoring, such as high network traffic volume, low
accuracy of monitoring data, and high time-toinsight, require new approaches in
IT Operations while considering administrative complexity. To handle these
challenges, we present DEAR, the Distributed Evaluation of Alerting Rules. DEAR
is a plugin for monitoring systems which automatically distributes alerting
rules to the monitored resources to solve the trade-off between high accuracy
and low network traffic volume without administrative overhead. We evaluate our
approach against requirements of today{\^a}€™s IT monitoring and compare it to
conventional agent-based monitoring approaches.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-37&engl=1}
}
@inproceedings {INPROC-2020-32,
author = {Vitali Hirsch and Peter Reimann and Bernhard Mitschang},
title = {{Exploiting Domain Knowledge to Address Multi-Class Imbalance and a Heterogeneous Feature Space in Classification Tasks for Manufacturing Data}},
booktitle = {Proceedings of the 46th International Conference on Very Large Databases (VLDB)},
editor = {Magdalena Balazinska and Xiaofang Zhou},
publisher = {ACM Digital Library},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Proceedings of the VLDB Endowment},
volume = {13(12)},
type = {Conference Paper},
month = {August},
year = {2020},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Classification techniques are increasingly adopted for quality control in
manufacturing, e. g., to help domain experts identify the cause of quality
issues of defective products. However, real-world data often imply a set of
analytical challenges, which lead to a reduced classification performance.
Major challenges are a high degree of multi-class imbalance within data and a
heterogeneous feature space that arises from the variety of underlying
products. This paper considers such a challenging use case in the area of
End-of-Line testing, i. e., the final functional test of complex products.
Existing solutions to classification or data pre-processing only address
individual analytical challenges in isolation. We propose a novel
classification system that explicitly addresses both challenges of multi-class
imbalance and a heterogeneous feature space together. As main contribution,
this system exploits domain knowledge to systematically prepare the training
data. Based on an experimental evaluation on real-world data, we show that our
classification system outperforms any other classification technique in terms
of accuracy. Furthermore, we can reduce the amount of rework required to solve
a quality issue of a product.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-32&engl=1}
}
@inproceedings {INPROC-2020-31,
author = {Yannick Wilhelm and Peter Reimann and Wolfgang Gauchel and Bernhard Mitschang},
title = {{Overview on Hybrid Approaches to Fault Detection and Diagnosis: Combining Data-driven, Physics-based and Knowledge-based Models}},
booktitle = {Procedia CIRP: Proceedings of the 14th CIRP Conference on Intelligent Computation in Manufacturing Engineering (CIRP ICME)},
publisher = {Elsevier BV},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Conference Paper},
month = {July},
year = {2020},
keywords = {Fault Detection; Fault Diagnosis; Hybrid Methods; Diagnostics and Maintenance; Knowledge-driven Methods; Machine Learning},
language = {English},
cr-category = {H.2.8 Database Applications,
I.2.1 Applications and Expert Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In this paper, we review hybrid approaches for fault detection and fault
diagnosis (FDD) that combine data-driven analysis with physics-based and
knowledge-based models to overcome a lack of data and to increase the FDD
accuracy. We categorize these hybrid approaches according to the steps of an
extended common workflow for FDD. This gives practitioners indications of which
kind of hybrid FDD approach they can use in their application.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-31&engl=1}
}
@inproceedings {INPROC-2020-20,
author = {Yannick Wilhelm and Ulf Schreier and Peter Reimann and Bernhard Mitschang and Holger Ziekow},
title = {{Data Science Approaches to Quality Control in Manufacturing: A Review of Problems, Challenges and Architecture}},
booktitle = {Springer Proceedings Series Communications in Computer and Information Science (CCIS)},
publisher = {Springer},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Conference Paper},
month = {October},
year = {2020},
keywords = {Data Science; Machine Learning; Quality Control; Challenges; Functional Architecture},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Manufacturing environments are characterized by non-stationary processes,
constantly varying conditions, complex process interdependencies, and a high
number of product variants. These and other aspects pose several challenges for
common machine learning algorithms to achieve reliable and accurate
predictions. This overview and vision paper provides a comprehensive list of
common problems and challenges for data science approaches to quality control
in manufacturing. We have derived these problems and challenges by inspecting
three real-world use cases in the eld of product quality control and via a
comprehensive literature study. We furthermore associate the identi ed problems
and challenges to individual layers and components of a functional setup, as it
can be found in manufacturing environments today. Additionally, we extend and
revise this functional setup and this way propose our vision of a future
functional software architecture. This functional architecture represents a
visionary blueprint for solutions that are able to address all challenges for
data science approaches in manufacturing quality control.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-20&engl=1}
}
@inproceedings {INPROC-2020-19,
author = {Christian Weber and Pascal Hirmer and Peter Reimann},
title = {{A Model Management Platform for Industry 4.0 - Enabling Management of Machine Learning Models in Manufacturing Environments}},
booktitle = {Proceedings of the 23rd International Conference on Business Information Systems (BIS)},
editor = {Witold Abramowicz and Rainer Alt and Gary Klein and Adrian Paschke and Kurt Sandkuhl},
publisher = {Springer International Publishing},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Business Information Processing},
type = {Conference Paper},
month = {November},
year = {2020},
issn = {1865-1348},
keywords = {Model Management; Machine Learning; Metadata Tracking},
language = {English},
cr-category = {H.3.4 Information Storage and Retrieval Systems and Software},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Industry 4.0 use cases such as predictive maintenance and product quality
control make it necessary to create, use and maintain a multitude of di erent
machine learning models. In this setting, model management systems help to
organize models. However, concepts for model management systems currently focus
on data scientists, but do not support non-expert users such as domain experts
and business analysts. Thus, it is dicult for them to reuse existing models
for their use cases. In this paper, we address these challenges and present an
architecture, a metadata schema and a corresponding model management platform.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-19&engl=1}
}
@inproceedings {INPROC-2020-18,
author = {Julian Ziegler and Peter Reimann and Florian Keller and Bernhard Mitschang},
title = {{A Graph-based Approach to Manage CAE Data in a Data Lake}},
booktitle = {Procedia CIRP: Proceedings of the 53rd CIRP Conference on Manufacturing Systems (CIRP CMS 2020)},
publisher = {Elsevier},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Conference Paper},
month = {July},
year = {2020},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Computer-aided engineering (CAE) applications generate vast quantities of
heterogeneous data. Domain experts often fail to explore and analyze these
data, because they are not integrated across di erent applications. Existing
data management solutions are rather tailored to scientific applications. In
our approach, we tackle this issue by combining a data lake solution with
graph-based metadata management. This provides a holistic view of all CAE data
and of the data-generating applications in one interconnected structure. Based
on a prototypical implementation, we discuss how this eases the task of domain
experts to explore and extract data for further analyses.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-18&engl=1}
}
@inproceedings {INPROC-2020-17,
author = {Vitali Hirsch and Peter Reimann and Bernhard Mitschang},
title = {{Incorporating Economic Aspects into Recommendation Ranking to Reduce Failure Costs}},
booktitle = {Procedia CIRP: Proceedings of the 53rd CIRP Conference on Manufacturing Systems (CIRP CMS 2020)},
publisher = {Elsevier},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Conference Paper},
month = {July},
year = {2020},
keywords = {decision support; predictive analytics; quality control; End-of-Line testing; classification; fault isolation; failure costs},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Machine learning approaches for manufacturing usually o er recommendation
lists, e.g., to support humans in fault diagnosis. For instance, if a product
does not pass the final check after the assembly, a recommendation list may
contain likely faulty product components to be replaced. Thereby, the list
ranks these components using their probabilities. However, these probabilities
often di er marginally, while economic impacts, e.g., the costs for replacing
components, di er significantly. We address this issue by proposing an approach
that incorporates costs to re-rank a list. Our evaluation shows that this
approach reduces fault-related costs when using recommendation lists to support
human labor.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-17&engl=1}
}
@inproceedings {INPROC-2020-11,
author = {Mathias Mormul and Pascal Hirmer and Christoph Stach and Bernhard Mitschang},
title = {{Avoiding Vendor-Lockin in Cloud Monitoring using Generic Agent Templates}},
booktitle = {Proceedings of the 23rd International Conference on Business Information Systems (BIS), 2020},
publisher = {Springer},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--1},
type = {Conference Paper},
month = {June},
year = {2020},
keywords = {Vendor Lock-in; Cloud monitoring; Monitoring agents; Genericity},
language = {German},
cr-category = {H.4.0 Information Systems Applications General},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Cloud computing passed the hype cycle long ago and firmly established itself as
a future technology since then. However, to utilize the cloud optimally, and
therefore, as cost-efficiently as possible, a continuous monitoring is key to
prevent an over- or under-commissioning of resources. However, selecting a
suitable monitoring solution is a challenging task. Monitoring agents that
collect monitoring data are spread across the monitored IT environment.
Therefore, the possibility of vendor lock-ins leads to a lack of flexibility
when the cloud environment or the business needs change. To handle these
challenges, we introduce $\backslash$textit{generic agent templates} that are applicable
to many monitoring systems and support a replacement of monitoring systems.
Solution-specific technical details of monitoring agents are abstracted from
and system administrators only need to model generic agents, which can be
transformed into solution-specific monitoring agents. The transformation logic
required for this process is provided by domain experts to not further burden
system administrators. Furthermore, we introduce an agent lifecycle to support
the system administrator with the management and deployment of generic agents.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-11&engl=1}
}
@inproceedings {INPROC-2020-08,
author = {Mathias Mormul and Christoph Stach},
title = {{A Context Model for Holistic Monitoring and Management of Complex IT Environments}},
booktitle = {Proceedings of the 2020 IEEE International Conference on Pervasive Computing and Communications Workshops (CoMoRea)},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--1},
type = {Workshop Paper},
month = {March},
year = {2020},
keywords = {Context Model; IT Operations; AIOps; Monitoring},
language = {English},
cr-category = {C.0 Computer Systems Organization, General},
contact = {Senden Sie eine E-Mail an mathias.mormul@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The increased usage of IoT, containerization, and multiple clouds not only
changed the way IT works but also the way IT Operations, i. e., the monitoring
and management of IT assets, works. Monitoring a complex IT environment leads
to massive amounts of heterogeneous context data, usually spread across
multiple data silos, which needs to be analyzed and acted upon autonomously.
However, for a holistic overview of the IT environment, context data needs to
be consolidated which leads to several problems. For scalable and automated
processes, it is essential to know what context is required for a given
monitored resource, where the context data are originating from, and how to
access them across the data silos. Therefore, we introduce the Monitoring
Resource Model for the holistic management of context data. We show what
context is essential for the management of monitored resources and how it can
be used for context reasoning. Furthermore, we propose a multi-layered
framework for IT Operations with which we present the benefits of the
Monitoring Resource Model.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-08&engl=1}
}
@inproceedings {INPROC-2020-07,
author = {Christoph Stach and Cl{\'e}mentine Gritti and Dennis Przytarski and Bernhard Mitschang},
title = {{Trustworthy, Secure, and Privacy-aware Food Monitoring Enabled by Blockchains and the IoT}},
booktitle = {Proceedings of the 18th Annual IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops), 23-27 March, 2020, Austin, Texas, USA},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--4},
type = {Conference Paper},
month = {March},
year = {2020},
keywords = {Attribute-based Credentials; Blockchain; Data Authentication; IoT; Privacy; Service Utility; Transparency; Trust},
language = {English},
cr-category = {K.6.5 Security and Protection,
D.4.6 Operating Systems Security and Protection},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {A large number of food scandals (e.g., falsely declared meat or non-compliance
with hygiene regulations) are causing considerable concern to consumers.
Although Internet of Things (IoT) technologies are used in the food industry to
monitor production (e.g., for tracing the origin of meat or monitoring cold
chains), the gathered data are not used to provide full transparency to the
consumer. To achieve this, however, three aspects must be considered: a) The
origin of the data must be verifiable, i.e., it must be ensured that the data
originate from calibrated sensors. b) The data must be stored tamper-resistant,
immutable, and open to all consumers. c) Despite this openness, the privacy of
affected data subjects (e.g., the carriers) must still be protected. To this
end, we introduce the SHEEPDOG architecture that {\ss}hepherds`` products from
production to purchase to enable a trustworthy, secure, and privacy-aware food
monitoring. In SHEEPDOG, attribute-based credentials ensure trustworthy data
acquisition, blockchain technologies provide secure data storage, and
fine-grained access control enables privacy-aware data provision.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-07&engl=1}
}
@inproceedings {INPROC-2020-06,
author = {Cornelia Kiefer and Peter Reimann and Bernhard Mitschang},
title = {{Prevent Low-Quality Analytics by Automatic Selection of the Best-Fitting Training Data}},
booktitle = {Proceedings of the 53rd Hawaii International Conference on System Sciences (HICSS)},
address = {Maui, Hawaii, USA},
publisher = {Online},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1036--1045},
type = {Conference Paper},
month = {January},
year = {2020},
isbn = {978-0-9981331-3-3},
keywords = {data quality; domain-specific data analysis; text analysis; text similarity; training data},
language = {English},
cr-category = {I.2.7 Natural Language Processing},
ee = {https://scholarspace.manoa.hawaii.edu/bitstream/10125/63868/0103.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Data analysis pipelines consist of a sequence of various analysis tools. Most
of these tools are based on supervised machine learning techniques and thus
rely on labeled training data. Selecting appropriate training data has a
crucial impact on analytics quality. Yet, most of the times, domain experts who
construct analysis pipelines neglect the task of selecting appropriate training
data. They rely on default training data sets, e.g., since they do not know
which other training data sets exist and what they are used for. Yet, default
training data sets may be very different from the domain-specific input data
that is to be analyzed, leading to low-quality results. Moreover, these input
data sets are usually unlabeled. Thus, information on analytics quality is not
measurable with evaluation metrics. Our contribution comprises a method that
(1) indicates the expected quality to the domain expert while constructing the
analysis pipeline, without need for labels and (2) automatically selects the
best-fitting training data. It is based on a measurement of the similarity
between input and training data. In our evaluation, we consider the
part-of-speech tagger tool and show that Latent Semantic Analysis (LSA) and
Cosine Similarity are suited as indicators for the quality of analysis results
and as basis for an automatic selection of the best-fitting training data.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-06&engl=1}
}
@inproceedings {INPROC-2020-04,
author = {Christoph Stach and Cl{\'e}mentine Gritti and Bernhard Mitschang},
title = {{Bringing Privacy Control back to Citizens: DISPEL - A Distributed Privacy Management Platform for the Internet of Things}},
booktitle = {Proceedings of the 35th ACM/SIGAPP Symposium On Applied Computing (PDP).},
address = {Brno, Czech Republic},
publisher = {ACM Press},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--8},
type = {Conference Paper},
month = {March},
year = {2020},
keywords = {privacy; IoT; authorization concept; attribute-based access control},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
D.4.6 Operating Systems Security and Protection},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The Internet of Things (IoT) is becoming increasingly popular. It enables a
variety of novel applications. Such applications require a lot of data about
their users. To this end, sensors continuously monitor various aspects of daily
life. Despite the indisputable benefits of IoT applications, this is a severe
privacy threat. Due to the GDPR coming into force, there is a need for action
on the part of IoT vendors. In this paper, we therefore introduce a Privacy by
Design approach for IoT applications called DISPEL. It provides a configuration
method enabling users to specify globally, which application may access what
data for which purpose. Privacy protection is then applied at the earliest
stage possible, i.e., directly on the IoT devices generating the data. Data
transmission is protected against unauthorized access and manipulation.
Evaluation results show that DISPEL fulfills the requirements towards an IoT
privacy system.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-04&engl=1}
}
@inproceedings {INPROC-2020-03,
author = {Christoph Stach and Corinna Giebler and Manuela Wagner and Christian Weber and Bernhard Mitschang},
title = {{AMNESIA: A Technical Solution towards GDPR-compliant Machine Learning}},
booktitle = {Proceedings of the 6th International Conference on Information Systems Security and Privacy (ICISSP 2020)},
editor = {Steven Furnell and Paolo Mori and Edgar Weippl and Olivier Camp},
address = {Valletta, Malta},
publisher = {SciTePress},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--12},
type = {Conference Paper},
month = {February},
year = {2020},
keywords = {Machine Learning; Data Protection; Privacy Zones; Access Control; Model Management; Provenance; GDPR},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
I.5.1 Pattern Recognition Models},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Machine Learning (ML) applications are becoming increasingly valuable due to
the rise of IoT technologies. That is, sensors continuously gather data from
different domains and make them available to ML for learning its models. This
provides profound insights into the data and enables predictions about future
trends. While ML has many advantages, it also represents an immense privacy
risk. Data protection regulations such as the GDPR address such privacy
concerns, but practical solutions for the technical enforcement of these laws
are also required. Therefore, we introduce AMNESIA, a privacy-aware machine
learning model provisioning platform. AMNESIA is a holistic approach covering
all stages from data acquisition to model provisioning. This enables to control
which application may use which data for ML as well as to make models ``forget''
certain knowledge.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-03&engl=1}
}
@inproceedings {INPROC-2019-32,
author = {Vitali Hirsch and Peter Reimann and Bernhard Mitschang},
title = {{Data-Driven Fault Diagnosis in End-of-Line Testing of Complex Products}},
booktitle = {Proceedings of the 6th IEEE International Conference on Data Science and Advanced Analytics (DSAA 2019), Washington, D.C., USA},
publisher = {IEEE Xplore},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Conference Paper},
month = {October},
year = {2019},
keywords = {decision support; classification; ensembles; automotive; fault diagnosis; quality management; sampling},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Machine learning approaches may support various use cases in the manufacturing
industry. However, these approaches often do not address the inherent
characteristics of the real manufacturing data at hand. In fact, real data
impose analytical challenges that have a strong influence on the performance
and suitability of machine learning methods. This paper considers such a
challenging use case in the area of End-of-Line testing, i.e., the final
functional check of complex products after the whole assembly line. Here,
classification approaches may be used to support quality engineers in
identifying faulty components of defective products. For this, we discuss
relevant data sources and their characteristics, and we derive the resulting
analytical challenges. We have identified a set of sophisticated data-driven
methods that may be suitable to our use case at first glance, e.g., methods
based on ensemble learning or sampling. The major contribution of this paper is
a thorough comparative study of these methods to identify whether they are able
to cope with the analytical challenges. This comprises the discussion of both
fundamental theoretical aspects and major results of detailed experiments we
have performed on the real data of our use case.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2019-32&engl=1}
}
@inproceedings {INPROC-2019-20,
author = {Manuel Fritz and Holger Schwarz},
title = {{Initializing k-Means Efficiently: Benefits for Exploratory Cluster Analysis}},
booktitle = {On the Move to Meaningful Internet Systems: OTM 2019 Conferences},
editor = {Herv{\'e} Panetto and Christophe Debruyne and Martin Hepp and Dave Lewis and Claudio Agostino Ardagna and Robert Meersman},
publisher = {Springer Nature Switzerland AG},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Computer Science (LNCS)},
volume = {11877},
pages = {146--163},
type = {Conference Paper},
month = {January},
year = {2019},
issn = {978-3-030-33245-7},
doi = {10.1007/978-3-030-33246-4},
keywords = {Exploratory cluster analysis; k-Means; Initialization},
language = {English},
cr-category = {E.0 Data General,
H.2.8 Database Applications,
H.3.3 Information Search and Retrieval},
ee = {https://link.springer.com/chapter/10.1007/978-3-030-33246-4_9},
contact = {manuel.fritz@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Data analysis is a highly exploratory task, where various algorithms with
different parameters are executed until a solid result is achieved. This is
especially evident for cluster analyses, where the number of clusters must be
provided prior to the execution of the clustering algorithm. Since this number
is rarely known in advance, the algorithm is typically executed several times
with varying parameters. Hence, the duration of the exploratory analysis
heavily dependends on the runtime of each execution of the clustering
algorithm. While previous work shows that the initialization of clustering
algorithms is crucial for fast and solid results, it solely focuses on a single
execution of the clustering algorithm and thereby neglects previous executions.
We propose Delta Initialization as an initialization strategy for k-Means in
such an exploratory setting. The core idea of this new algorithm is to exploit
the clustering results of previous executions in order to enhance the
initialization of subsequent executions. We show that this algorithm is well
suited for exploratory cluster analysis as considerable speedups can be
achieved while additionally achieving superior clustering results compared to
state-of-the-art initialization strategies.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2019-20&engl=1}
}
@inproceedings {INPROC-2019-19,
author = {Christoph Stach},
title = {{VAULT: A Privacy Approach towards High-Utility Time Series Data}},
booktitle = {Proceedings of the Thirteenth International Conference on Emerging Security Information, Systems and Technologies: SECURWARE 2019},
publisher = {IARIA},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--6},
type = {Conference Paper},
month = {October},
year = {2019},
keywords = {Privacy; Time Series; Projection; Selection; Aggregation; Interpolation; Smoothing; Information Emphasization; Noise},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
D.4.6 Operating Systems Security and Protection},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {While the Internet of Things (IoT) is a key driver for Smart Services that
greatly facilitate our everyday life, it also poses a serious threat to
privacy. Smart Services collect and analyze a vast amount of (partly private)
data and thus gain valuable insights concerning their users. To prevent this,
users have to balance service quality (i.e., reveal a lot of private data) and
privacy (i.e., waive many features). Current IoT privacy approaches do not
reflect this discrepancy properly and are often too restrictive as a
consequence. For this reason, we introduce VAULT, a new approach for the
protection of private data. VAULT is tailored to time series data as used by
the IoT. It achieves a good tradeoff between service quality and privacy. For
this purpose, VAULT applies five different privacy techniques. Our
implementation of VAULT adopts a Privacy by Design approach.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2019-19&engl=1}
}
@inproceedings {INPROC-2019-18,
author = {Dominik Lucke and Frank Steimle and Emir Cuk and Michael Luckert and Matthias Schneider and Daniel Schel},
title = {{Implementation of the MIALinx User Interface for Future Manufacturing Environments}},
booktitle = {Proceedings of the 52nd CIRP Conference on Manufacturing Systems (CMS), Ljubljana, Slovenia, June 12-14, 2019},
publisher = {Elsevier},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Procedia CIRP},
volume = {81},
pages = {606--611},
type = {Conference Paper},
month = {June},
year = {2019},
doi = {10.1016/j.procir.2019.03.163},
keywords = {Manufacturing; Smart Factory; Industrie 4.0; Manufacturing Service Bus; Rules; Integration; User Interface},
language = {English},
cr-category = {H.4.0 Information Systems Applications General,
I.2.1 Applications and Expert Systems},
ee = {http://www.sciencedirect.com/science/article/pii/S2212827119304688},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The flexible and easy-to-use integration of production equipment and IT systems
on the shop floor becomes more and more a success factor for manufacturing to
adapt rapidly to changing situations. The approach of the Manufacturing
Integration Assistant (MIALinx) is to simplify this challenge. The integration
steps range from integrating sensors over collecting and rule-based processing
of sensor information to the execution of required actions. This paper presents
the implementation of MIALinx to retrofit legacy machines for Industry 4.0 in a
manufacturing environment and focus on the concept and implementation of the
easy-to-use user interface as a key element.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2019-18&engl=1}
}
@inproceedings {INPROC-2019-16,
author = {Marco Spie{\ss} and Peter Reimann},
title = {{Angepasstes Item Set Mining zur gezielten Steuerung von Bauteilen in der Serienfertigung von Fahrzeugen}},
booktitle = {Tagungsband der 18. Konferenz Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2019)},
publisher = {Gesellschaft f{\"u}r Informatik (GI)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Informatics (LNI)},
pages = {119--128},
type = {Conference Paper},
month = {March},
year = {2019},
language = {German},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Qualit{\"a}tsprobleme im Bereich Fahrzeugbau k{\"o}nnen nicht nur zum Imageverlust des
Unternehmens f{\"u}hren, sondern auch mit entsprechend hohen Kosten einhergehen.
Wird ein Bauteil als Verursacher eines Qualit{\"a}tsproblems identifiziert, muss
dessen Verbau gestoppt werden. Mit einer Datenanalyse kann herausgefunden
werden, welche Fahrzeugkonfigurationen Probleme mit diesem fehlerverursachenden
Bauteil haben. Im Rahmen der dom{\"a}nenspezifischen Problemstellung wird in diesem
Beitrag die Anwendbarkeit von Standardalgorithmen aus dem Bereich Data-Mining
untersucht. Da die Analyseergebnisse auf Standardausstattungen hinweisen, sind
diese nicht zielf{\"u}hrend. F{\"u}r dieses Businessproblem von Fahrzeugherstellern
haben wir einen Data-Mining Algorithmus entwickelt, der das Vorgehen des Item
Set Mining der Assoziationsanalyse an das dom{\"a}nenspezifische Problem anpasst.
Er unterscheidet sich zum klassischen Apriori-Algorithmus in der Beschneidung
des Ergebnisraumes sowie in der nachfolgenden Aufbereitung und Verwendungsweise
der Item Sets. Der Algorithmus ist allgemeing{\"u}ltig f{\"u}r alle Fahrzeughersteller
anwendbar. Die Ergebnisse sind anhand eines realen Anwendungsfalls evaluiert
worden, bei dem durch die Anwendung unseres Algorithmus 87\% der Feldausf{\"a}lle
verhindert werden k{\"o}nnen.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2019-16&engl=1}
}
@inproceedings {INPROC-2019-15,
author = {Corinna Giebler and Christoph Gr{\"o}ger and Eva Hoos and Holger Schwarz},
title = {{Modeling Data Lakes with Data Vault: Practical Experiences, Assessment, and Lessons Learned}},
booktitle = {Proceedings of the 38th Conference on Conceptual Modeling (ER 2019)},
publisher = {Springer},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--14},
type = {Conference Paper},
month = {November},
year = {2019},
keywords = {Data Lakes; Data Vault; Data Modeling; Industry Experience; Assessment; Lessons Learned},
language = {German},
cr-category = {H.2.1 Database Management Logical Design},
contact = {Senden Sie eine E-Mail an Corinna.Giebler@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Data lakes have become popular to enable organization-wide analytics on
heterogeneous data from multiple sources. Data lakes store data in their raw
format and are often characterized as schema-free. Nevertheless, it turned out
that data still need to be modeled, as neglecting data modeling may lead to
issues concerning e.g., quality and integration. In current research literature
and industry practice, Data Vault is a popular modeling technique for
structured data in data lakes. It promises a flexible, extensible data model
that preserves data in their raw format. However, hardly any research or
assessment exist on the practical usage of Data Vault for modeling data lakes.
In this paper, we assess the Data Vault model{\^a}€™s suitability for the data lake
context, present lessons learned, and investigate success factors for the use
of Data Vault. Our discussion is based on the practical usage of Data Vault in
a large, global manufacturer{\^a}€™s data lake and the insights gained in
real-world analytics projects.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2019-15&engl=1}
}
@inproceedings {INPROC-2019-14,
author = {Corinna Giebler and Christoph Gr{\"o}ger and Eva Hoos and Holger Schwarz},
title = {{Leveraging the Data Lake - Current State and Challenges}},
booktitle = {Proceedings of the 21st International Conference on Big Data Analytics and Knowledge Discovery (DaWaK'19)},
publisher = {Springer Nature},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--10},
type = {Conference Paper},
month = {August},
year = {2019},
keywords = {Data Lakes, State of the Art, Challenges},
language = {German},
cr-category = {H.2.4 Database Management Systems,
H.2.8 Database Applications},
contact = {Senden Sie eine E-Mail an Corinna.Giebler@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The digital transformation leads to massive amounts of heterogeneous data
challenging traditional data warehouse solutions in enterprises. In order to
exploit these complex data for competitive advantages, the data lake recently
emerged as a concept for more flexible and powerful data analytics. However,
existing literature on data lakes is rather vague and incomplete, and the
various realization approaches that have been proposed neither cover all
aspects of data lakes nor do they provide a comprehensive design and
realization strategy. Hence, enterprises face multiple challenges when building
data lakes. To address these shortcomings, we investigate existing data lake
literature and discuss various design and realization aspects for data lakes,
such as governance or data models. Based on these insights, we identify
challenges and research gaps concerning (1) data lake architecture, (2) data
lake governance, and (3) a comprehensive strategy to realize data lakes. These
challenges still need to be addressed to successfully leverage the data lake in
practice.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2019-14&engl=1}
}
@inproceedings {INPROC-2019-12,
author = {Christoph Stach},
title = {{Konzepte zum Schutz privater Muster in Zeitreihendaten: IoT-Anwendungen im Spannungsfeld zwischen Servicequalit{\"a}t und Datenschutz}},
booktitle = {Informatik 2019: 50 Jahre Gesellschaft f{\"u}r Informatik – Informatik f{\"u}r Gesellschaft, Tagungsband der 49. Jahrestagung der Gesellschaft f{\"u}r Informatik e.V. (GI), 23.09. - 26.09.2019, Kassel},
publisher = {GI Gesellschaft f{\"u}r Informatik e.V. (GI)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Informatics (LNI)},
pages = {1--14},
type = {Conference Paper},
month = {September},
year = {2019},
keywords = {Datenschutz; Zeitreihendaten; IoT; DSGVO; ePrivacy-Verordnung; TICK-Stack},
language = {German},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
G.1.10 Numerical Analysis Applications},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Obwohl das Internet der Dinge (IoT) die Voraussetzung f{\"u}r smarte Anwendungen
schafft, die signifikante Vorteile gegen{\"u}ber traditionellen Anwendungen
bieten, stellt die zunehmende Verbreitung von IoT-f{\"a}higen Ger{\"a}ten auch eine
immense Gef{\"a}hrdung der Privatheit dar. IoT-Anwendungen sammeln eine Vielzahl
an Daten und senden diese zur Verarbeitung an ein leistungsstarkes Back-End.
Hierbei werden umfangreiche Erkenntnisse {\"u}ber den Nutzer gewonnen. Erst dieses
Wissen erm{\"o}glicht die Servicevielfalt die IoT-Anwendungen bieten. Der Nutzer
muss daher einen Kompromiss aus Servicequalit{\"a}t und Datenschutz treffen.
Heutige Datenschutzans{\"a}tze ber{\"u}cksichtigen dies unzureichend und sind dadurch
h{\"a}ufig zu restriktiv. Aus diesem Grund stellen wir neue Konzepte zum Schutz
privater Daten f{\"u}r das IoT vor. Diese ber{\"u}cksichtigen die speziellen
Eigenschaften der im IoT zum Einsatz kommenden Zeitreihendaten. So kann die
Privatheit des Nutzers gew{\"a}hrleistet werden, ohne die Servicequalit{\"a}t
unn{\"o}tig einzuschr{\"a}nken. Basierend auf den TICK-Stack beschreiben wir
Implementierungsans{\"a}tze f{\"u}r unsere Konzepte, die einem
Privacy-by-Design-Ansatz folgen.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2019-12&engl=1}
}
@inproceedings {INPROC-2019-10,
author = {Christian Weber and Pascal Hirmer and Peter Reimann and Holger Schwarz},
title = {{A New Process Model for the Comprehensive Management of Machine Learning Models}},
booktitle = {Proceedings of the 21st International Conference on Enterprise Information Systems (ICEIS); Heraklion, Crete, Greece, May 3-5, 2019},
editor = {Joaquim Filipe and Michal Smialek and Alexander Brodsky and Slimane Hammoudi},
publisher = {SciTePress},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {415--422},
type = {Conference Paper},
month = {May},
year = {2019},
isbn = {978-989-758-372-8},
doi = {10.5220/0007725304150422},
keywords = {Model Management; Machine Learning; Analytics Process},
language = {English},
cr-category = {I.2 Artificial Intelligence},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The management of machine learning models is an extremely challenging task.
Hundreds of prototypical models are being built and just a few are mature
enough to be deployed into operational enterprise information systems. The
lifecycle of a model includes an experimental phase in which a model is
planned, built and tested. After that, the model enters the operational phase
that includes deploying, using, and retiring it. The experimental phase is well
known through established process models like CRISP-DM or KDD. However, these
models do not detail on the interaction between the experimental and the
operational phase of machine learning models. In this paper, we provide a new
process model to show the interaction points of the experimental and
operational phase of a machine learning model. For each step of our process, we
discuss according functions which are relevant to managing machine learning
models.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2019-10&engl=1}
}
@inproceedings {INPROC-2019-09,
author = {Christoph Stach and Bernhard Mitschang},
title = {{ECHOES: A Fail-safe, Conflict Handling, and Scalable Data Management Mechanism for the Internet of Things}},
booktitle = {Proceedings of the 23rd European Conference on Advances in Databases and Information Systems: ADBIS '19; Bled, Slovenia, September 8-11, 2019},
editor = {Tatjana Welzer and Johann Eder and Vili Podgorelec and Aida Kamisalic Latific},
publisher = {Springer},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Computer Science (LNCS)},
pages = {1--16},
type = {Conference Paper},
month = {September},
year = {2019},
keywords = {Internet of Things; Data Exchange; Synchronization Protocol},
language = {English},
cr-category = {H.2.7 Database Administration,
H.2.4 Database Management Systems},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The Internet of Things (IoT) and Smart Services are becoming increasingly
popular. Such services adapt to a user's needs by using sensors to detect the
current situation. Yet, an IoT service has to capture its required data by
itself, even if another service has already captured it before. There is no
data exchange mechanism adapted to the IoT which enables sharing of sensor data
among services and across devices.
Therefore, we introduce a data management mechanism for the IoT. Due to its
applied state-based synchronization protocol called ECHOES. It is fail-safe in
case of connection failures, it detects and handles data conflicts, it is
geared towards devices with limited resources, and it is highly scalable. We
embed ECHOES into a data provisioning infrastructure, namely the Privacy
Management Platform and the Secure Data Container. Evaluation results verify
the practicability of our approach.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2019-09&engl=1}
}
@inproceedings {INPROC-2019-08,
author = {Cornelia Kiefer and Peter Reimann and Bernhard Mitschang},
title = {{A Hybrid Information Extraction Approach Exploiting Structured Data Within a Text Mining Process}},
booktitle = {18. Fachtagung des GI-Fachbereichs ,,Datenbanken und Informationssysteme (DBIS), 4.-8. M{\"a}rz 2019, Rostock, Germany, Proceedings.},
editor = {Torsten Grust and Felix Naumann and Alexander B{\"o}hm and Wolfgang Lehner and Theo H{\"a}rder and Erhard et al. Rahm},
address = {Bonn},
publisher = {Gesellschaft f$\backslash$``{u}r Informatik e.V. (GI)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {149--168},
type = {Conference Paper},
month = {March},
year = {2019},
keywords = {information extraction; clustering; text mining; free text fields},
language = {English},
cr-category = {I.2.7 Natural Language Processing},
ee = {https://doi.org/10.18420/btw2019-10},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Many data sets encompass structured data fields with embedded free text fields.
The text fields allow customers and workers to input information which cannot
be encoded in structured fields. Several approaches use structured and
unstructured data in isolated analyses. The result of isolated mining of
structured data fields misses crucial information encoded in free text. The
result of isolated text mining often mainly repeats information already
available from structured data. The actual information gain of isolated text
mining is thus limited. The main drawback of both isolated approaches is that
they may miss crucial information. The hybrid information extraction approach
suggested in this paper adresses this issue. Instead of extracting information
that in large parts was already available beforehand, it extracts new, valuable
information from free texts. Our solution exploits results of analyzing
structured data within the text mining process, i.e., structured information
guides and improves the information extraction process on textual data. Our
main contributions comprise the description of the concept of hybrid
information extraction as well as a prototypical implementation and an
evaluation with two real-world data sets from aftersales and production with
English and German free text fields.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2019-08&engl=1}
}
@inproceedings {INPROC-2019-07,
author = {Cornelia Kiefer},
title = {{Quality Indicators for Text Data}},
booktitle = {18. Fachtagung des GI-Fachbereichs ,,Datenbanken und Informationssysteme (DBIS), 4.-8. M{\"a}rz 2019, Rostock, Germany, Workshopband.},
editor = {Holger Meyer and Norbert Ritter and Andreas Thor and Daniela Nicklas and Andreas Heuer and Meike Klettke},
address = {Bonn},
publisher = {Gesellschaft f$\backslash$``{u}r Informatik e.V. (GI)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Dagstuhl Reports},
pages = {145--154},
type = {Conference Paper},
month = {March},
year = {2019},
keywords = {data quality; text data quality; text mining; text analysis; quality indicators for text data},
language = {English},
cr-category = {I.2.7 Natural Language Processing},
ee = {https://doi.org/10.18420/btw2019-ws-15},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Textual data sets vary in terms of quality. They have different characteristics
such as the average sentence length or the amount of spelling mistakes and
abbreviations. These text characteristics have influence on the quality of text
mining results. They may be measured automatically by means of quality
indicators. We present indicators, which we implemented based on natural
language processing libraries such as Stanford CoreNLP and NLTK. We discuss
design decisions in the implementation of exemplary indicators and provide all
indicators on GitHub. In the evaluation, we investigate freetexts from
production, news, prose, tweets and chat data and show that the suggested
indicators predict the quality of two text mining modules.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2019-07&engl=1}
}
@inproceedings {INPROC-2019-06,
author = {Christoph Stach and Frank Steimle and Clementine Gritti and Bernhard Mitschang},
title = {{PSSST! The Privacy System for Smart Service Platforms: An Enabler for Confidable Smart Environments}},
booktitle = {Proceedings of the 4th International Conference on Internet of Things, Big Data and Security (IoTBDS '19)},
publisher = {SciTePress},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--12},
type = {Conference Paper},
month = {May},
year = {2019},
keywords = {Privacy; Access Control; Internet of Things; Smart Service Platform; Sensors; Actuators; Stream Processing},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
D.4.6 Operating Systems Security and Protection},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The Internet of Things and its applications are becoming increasingly popular.
Especially Smart Service Platforms like Alexa are in high demand. Such a
platform retrieves data from sensors, processes them in a back-end, and
controls actuators in accordance with the results. Thereby, all aspects of our
everyday life can be managed. In this paper, we reveal the downsides of this
technology by identifying its privacy threats based on a real-world
application. Our studies show that current privacy systems do not tackle these
issues adequately. Therefore, we introduce PSSST!, a user-friendly and
comprehensive privacy system for Smart Service Platforms limiting the amount of
disclosed private information while maximizing the quality of service at the
same time.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2019-06&engl=1}
}
@inproceedings {INPROC-2019-03,
author = {Christoph Stach and Corinna Giebler and Simone Schmidt},
title = {{Zuverl{\"a}ssige Versp{\"a}tungsvorhersagen mithilfe von TAROT}},
booktitle = {Tagungsband der 18. GI-Fachtagung Datenbanksysteme f{\"u}r Business, Technologie und Web},
publisher = {GI Gesellschaft f{\"u}r Informatik e.V. (GI)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Informatics (LNI)},
pages = {1--4},
type = {Demonstration},
month = {March},
year = {2019},
keywords = {Versp{\"a}tungsvorhersage; {\"O}PNV; deskriptive Analyse; pr{\"a}diktive Analyse; Concept Drift},
language = {German},
cr-category = {H.2.8 Database Applications,
H.2.4 Database Management Systems},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Bei der Einhaltung von Schadstoffwerten nehmen {\"o}ffentliche Verkehrsmittel eine
immer entscheidendere Rolle ein. Daher wird vermehrt darauf geachtet, deren
Attraktivit{\"a}t zu erh{\"o}hen. Ein wichtiger Punkt hierbei ist die
Vorhersagegenauigkeit von Versp{\"a}tungen zu verbessern, damit Fahrg{\"a}ste
entsprechend planen k{\"o}nnen. Die aktuell angewandten Ans{\"a}tze sind h{\"a}ufig
ungenau, da sie die zur Verf{\"u}gung stehenden Daten nicht ausreichend nutzen. In
diesem Beitrag stellen wir daher mit TAROT ein System vor, das mittels
pr{\"a}diktiver Analysen die Vorhersagegenauigkeit von Versp{\"a}tungen verbessert,
indem es in den Modellen Versp{\"a}tungsfortpflanzungen ber{\"u}cksichtigt. Dar{\"u}ber
hinaus ist es in der Lage, im Fall einer St{\"o}rung augenblicklich auf ein
besseres Vorhersagemodell umzusteigen und auf sowohl schleichende als auch
abrupte Ver{\"a}nderungen automatisch zu reagieren. Die Vorteile dieser
Eigenschaften lassen sich in unserem TAROT-Demonstrator anhand von vier
repr{\"a}sentativen Anwendungsszenarien zeigen. Auch wenn sich die gezeigten
Szenarien alle auf die Versp{\"a}tungsvorhersage von S-Bahnen beziehen, lassen
sich die Konzepte von TAROT auch auf viele andere Anwendungsbereiche (z.B. zur
Bestimmung von Produktionszeiten in der Industrie 4.0) anwenden.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2019-03&engl=1}
}
@inproceedings {INPROC-2019-02,
author = {Christoph Stach and Frank Steimle},
title = {{Recommender-based Privacy Requirements Elicitation - EPICUREAN: An Approach to Simplify Privacy Settings in IoT Applications with Respect to the GDPR}},
booktitle = {Proceedings of the 34th ACM/SIGAPP Symposium On Applied Computing (PDP)},
publisher = {ACM Press},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--8},
type = {Conference Paper},
month = {April},
year = {2019},
keywords = {privacy requirements elicitation; recommender system; knowledge modeling; clustering; association rules; privacy system; IoT; eHealth},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
J.4 Social and Behavioral Sciences,
H.3.3 Information Search and Retrieval},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Due to the Internet of Things (IoT), a giant leap towards a quantified self is
made, i.e., more and more aspects of our lives are being captured, processed,
and analyzed. This has many positive implications, e.g., Smart Health services
help to relieve patients as well as physicians and reduce treatment costs.
However, the price for such services is the disclosure of a lot of private
data. For this reason, Smart Health services were particularly considered by
the European General Data Protection Regulation (GDPR): a data subject's
explicit consent is required when such a service processes his or her data.
However, the elicitation of privacy requirements is a shortcoming in most IoT
privacy systems. Either the user is overwhelmed by too many options or s/he is
not sufficiently involved in the decision process. For this reason, we
introduce EPICUREAN, a recommender-based privacy requirements elicitation
approach. EPICUREAN uses modeling and data mining techniques to determine and
recommend appropriate privacy settings to the user. The user is thus
considerably supported but remains in full control over his or her private
data.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2019-02&engl=1}
}
@inproceedings {INPROC-2018-54,
author = {Alejandro Villanueva Zacarias and Peter Reimann and Bernhard Mitschang},
title = {{A Framework to Guide the Selection and Configuration of Machine-Learning-based Data Analytics Solutions in Manufacturing}},
booktitle = {Proceedings of the 51st CIRP Conference on Manufacturing Systems (CIRP CMS 2018)},
publisher = {Elsevier BV},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {153--158},
type = {Conference Paper},
month = {May},
year = {2018},
keywords = {data analytics; machine learning; learning algorithms; generative design},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Users in manufacturing willing to apply machine-learning-based (ML-based) data
analytics face challenges related to data quality or to the selection and
configuration of proper ML algorithms. Current approaches are either purely
empirical or reliant on technical data. This makes understanding and comparing
candidate solutions difficult, and also ignores the way it impacts the real
application problem. In this paper, we propose a framework to generate
analytics solutions based on a systematic profiling of all aspects involved.
With it, users can visually and systematically explore relevant alternatives
for their specific scenario, and obtain recommendations in terms of costs,
productivity, results quality, or execution time.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-54&engl=1}
}
@inproceedings {INPROC-2018-53,
author = {Vitali Hirsch and Peter Reimann and Oliver Kirn and Bernhard Mitschang},
title = {{Analytical Approach to Support Fault Diagnosis and Quality Control in End-Of-Line Testing}},
booktitle = {Proceedings of the 51st CIRP Conference on Manufacturing Systems (CIRP CMS 2018)},
publisher = {Elsevier BV},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1333--1338},
type = {Conference Paper},
month = {May},
year = {2018},
keywords = {Analytics; decision support; recommendation system; fault diagnosis; quality control},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Operators in end-of-line testing of assembly lines often try out multiple
solutions until they can solve a product quality issue. This calls for a
decision support system based on data analytics that effectively helps
operators in fault diagnosis and quality control. However, existing analytical
approaches do not consider the specific data characteristics being prevalent in
the area of End-of-Line (EoL) testing. We address this issue by proposing an
analytical approach that is tailored to EoL testing. We show how to implement
this approach in a real-world use case of a large automotive manufacturer,
which reveals its potential to reduce unnecessary rework.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-53&engl=1}
}
@inproceedings {INPROC-2018-41,
author = {Michael Wurster and Uwe Breitenb{\"u}cher and Oliver Kopp and Frank Leymann},
title = {{Modeling and Automated Execution of Application Deployment Tests}},
booktitle = {Proceedings of the IEEE 22nd International Enterprise Distributed Object Computing Conference (EDOC)},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {171--180},
type = {Conference Paper},
month = {October},
year = {2018},
doi = {10.1109/EDOC.2018.00030},
keywords = {Testing; Declarative Application Deployment; Test Automation; Model-based Testing; TOSCA},
language = {English},
cr-category = {D.2.5 Software Engineering Testing and Debugging,
D.2.9 Software Engineering Management},
contact = {Michael Wurster michael.wurster@iaas.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {In recent years, many deployment systems have been developed that process
deployment models to automatically provision applications. The main objective
of these systems is to shorten delivery times and to ensure a proper execution
of the deployment process. However, these systems mainly focus on the correct
technical execution of the deployment, but do not check whether the deployed
application is working properly. Especially in DevOps scenarios where
applications are modified frequently, this can quickly lead to broken
deployments, for example, if a wrong component version was specified in the
deployment model that has not been adapted to a new database schema.
Ironically, even hardly noticeable errors in deployment models quickly result
in technically successful deployments, which do not work at all. In this paper,
we tackle these issues. We present a modeling concept that enables developers
to define deployment tests directly along with the deployment model. These
tests are then automatically run by a runtime after deployment to verify that
the application is working properly. To validate the technical feasibility of
the approach, we applied the concept to TOSCA and extended an existing open
source TOSCA runtime.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-41&engl=1}
}
@inproceedings {INPROC-2018-38,
author = {Andreas Liebing and Lutz Ashauer and Uwe Breitenb{\"u}cher and Thomas G{\"u}nther and Michael Hahn and K{\'a}lm{\'a}n K{\'e}pes and Oliver Kopp and Frank Leymann and Bernhard Mitschang and Ana C. Franco da Silva and Ronald Steinke},
title = {{The SmartOrchestra Platform: A Configurable Smart Service Platform for IoT Systems}},
booktitle = {Papers from the 12th Advanced Summer School on Service-Oriented Computing (SummerSoC 2018)},
publisher = {IBM Research Division},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {14--21},
type = {Conference Paper},
month = {October},
year = {2018},
keywords = {SmartOrchestra Platform; Smart Services; Cyber-Physical Systems; Internet of Things},
language = {English},
cr-category = {K.6 Management of Computing and Information Systems,
D.2.7 Software Engineering Distribution, Maintenance, and Enhancement,
D.2.12 Software Engineering Interoperability},
ee = {https://www.2018.summersoc.eu/},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {The Internet of Things is growing rapidly while still missing a universal
operat-ing and management platform for multiple diverse use cases. Such a
platform should provide all necessary functionalities and the underlying
infrastructure for the setup, execution and composition of Smart Services. The
concept of Smart Services enables the connection and integration of
cyber-physical systems (CPS) and technologies (i.e., sensors and actuators)
with business-related applications and services. Therefore, the SmartOrchestra
Platform provides an open and standards-based service platform for the
utilization of public administrative and business-related Smart Services. It
combines the features of an operating plat-form, a marketplace, a broker, and a
notary for a cloud-based operation of Smart Services. Thus, users of
cyber-physical systems are free to choose their control applications, no matter
what device they are using (e.g., smartphone, tablet or personal computer) and
they also become independent of the manufacturers’ software. This will enable
new business opportunities for different stakeholders in the market and allows
flexibly composing Smart Services.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-38&engl=1}
}
@inproceedings {INPROC-2018-34,
author = {Ana Cristina Franco da Silva and Pascal Hirmer and Rafael Koch Peres and Bernhard Mitschang},
title = {{An Approach for CEP Query Shipping to Support Distributed IoT Environments}},
booktitle = {Proceedings of the IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {247--252},
type = {Conference Paper},
month = {October},
year = {2018},
isbn = {978-1-5386-3227-7},
doi = {10.1109/PERCOMW.2018.8480241},
language = {English},
cr-category = {H.0 Information Systems General},
ee = {https://ieeexplore.ieee.org/document/8480241},
contact = {francoaa@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-34&engl=1}
}
@inproceedings {INPROC-2018-33,
author = {Karoline Saatkamp and Uwe Breitenb{\"u}cher and Oliver Kopp and Frank Leymann},
title = {{Application Scenarios for Automated Problem Detection in TOSCA Topologies by Formalized Patterns}},
booktitle = {Papers From the 12th Advanced Summer School of Service-Oriented Computing (SummerSOC 2018)},
publisher = {IBM Research Division},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {43--53},
type = {Conference Paper},
month = {October},
year = {2018},
keywords = {Cloud Computing Patterns; Formalization; Prolog; TOSCA},
language = {English},
cr-category = {C.2.4 Distributed Systems,
D.2.7 Software Engineering Distribution, Maintenance, and Enhancement,
K.6 Management of Computing and Information Systems},
ee = {https://www.2018.summersoc.eu/},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-33&engl=1}
}
@inproceedings {INPROC-2018-28,
author = {Mathias Mormul and Pascal Hirmer and Matthias Wieland and Bernhard Mitschang},
title = {{Layered Modeling Approach for Distributed Situation Recognition in Smart Environments}},
booktitle = {Tagungsband: SMART 2018, The Seventh International Conference on Smart Cities, Systems, Devices and Technologies},
publisher = {Xpert Publishing Services},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {47--53},
type = {Conference Paper},
month = {July},
year = {2018},
isbn = {978-1-61208-653-8},
keywords = {Industry 4.0; Edge Computing; Smart Factories; Smart Homes; Situation Recognition; Distribution Pattern},
language = {English},
cr-category = {H.0 Information Systems General},
contact = {mathias.mormul@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-28&engl=1}
}
@inproceedings {INPROC-2018-24,
author = {Dominik Lucke and Peter Einberger and Daniel Schel and Michael Luckert and Matthias Schneider and Emir Cuk and Thomas Bauernhansl and Matthias Wieland and Frank Steimle and Bernhard Mitschang},
title = {{Implementation of the MIALinx Integration Concept for Future Manufacturing Environments to Enable Retrofitting of Machines}},
booktitle = {Proceedings of the 12th CIRP Conference on Intelligent Computation in Manufacturing Engineering (CIRP ICME '18); Naples, Italy, July 18-20, 2018},
publisher = {Elsevier},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {596--601},
type = {Conference Paper},
month = {July},
year = {2018},
doi = {10.1016/j.procir.2019.02.084},
keywords = {Manufacturing; Smart Factory; Industrie 4.0; Manufacturing Service Bus; Rules; Integration; MIALinx},
language = {English},
cr-category = {H.4.0 Information Systems Applications General,
I.2.1 Applications and Expert Systems},
ee = {http://www.sciencedirect.com/science/article/pii/S221282711930201X},
contact = {Frank.Steimle@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Manufacturing has to adapt permanently to changing situations in order to stay
competitive. It demands a flexible and easy-to-use integration of production
equipment and ICT systems on the shop floor. The contribution of this paper is
the presentation of the implementation architecture of the Manufacturing
Integration Assistant (MIALinx) that simplifies this challenge. The integration
steps range from integrating sensors over collecting and rule-based processing
of sensor information to the execution of required actions. Furthermore, we
describe the implementation of MIALinx by commissioning it in a manufacturing
environment to retrofit legacy machines for Industrie 4.0. Finally, we validate
the suitability of our approach by applying our solution in the production
environment of a medium-size company.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-24&engl=1}
}
@inproceedings {INPROC-2018-23,
author = {Dominik Brenner and Christian Weber and Juergen Lenz and Engelbert Westk{\"a}mper},
title = {{Total Tool Cost of Ownership Indicator for Holistical Evaluations of Improvement Measures within the Cutting Tool Life Cycle}},
booktitle = {51st CIRP Conference on Manufacturing Systems (CIRP CMS), Stockholm, Sweden, May 16-18, 2018},
editor = {Lihui Wang and Torsten Kjellberg and Xi Vincent Wang and Wei Ji},
publisher = {Elsevier},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Procedia CIRP},
volume = {72},
pages = {1404--1409},
type = {Conference Paper},
month = {May},
year = {2018},
doi = {https://doi.org/10.1016/j.procir.2018.03.164},
issn = {2212-8271},
keywords = {Cutting Tool Life Cycle; Total Cost of Ownership; Manufacturing; Data Integration},
language = {English},
cr-category = {H.3.0 Information Storage and Retrieval General,
H.4.0 Information Systems Applications General,
J.1 Administration Data Processing},
ee = {http://www.sciencedirect.com/science/article/pii/S2212827118303226},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-23&engl=1}
}
@inproceedings {INPROC-2018-22,
author = {Rachaa Ghabri and Pascal Hirmer and Bernhard Mitschang},
title = {{A Hybrid Approach to Implement Data Driven Optimization into Production Environments}},
booktitle = {Proceedings of the 21st International Conference on Business Information Systems (BIS)},
editor = {Witold Abramowicz and Adrian Paschke},
publisher = {Springer Berlin Heidelberg},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Business Information Processing},
volume = {320},
pages = {3--14},
type = {Conference Paper},
month = {July},
year = {2018},
isbn = {978-3-319-93930-8},
doi = {https://doi.org/10.1007/978-3-319-93931-5},
issn = {1865-1356},
keywords = {Data driven optimization; Production environment; Top-down; Bottom-up},
language = {English},
cr-category = {H.0 Information Systems General},
ee = {https://link.springer.com/chapter/10.1007/978-3-319-93931-5_1},
contact = {rachaa.ghabri@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-22&engl=1}
}
@inproceedings {INPROC-2018-19,
author = {Christoph Stach and Frank Steimle and Bernhard Mitschang},
title = {{THOR - Ein Datenschutzkonzept f{\"u}r die Industrie 4.0: Datenschutzsysteme f{\"u}r die Smart Factory zur Realisierung der DSGVO}},
booktitle = {Informatik 2018: Zukunft der Arbeit - Zukunft der Informatik, Tagungsband der 48. Jahrestagung der Gesellschaft f{\"u}r Informatik e.V. (GI), 26.09. - 27.09.2018, Berlin.},
publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Informatics (LNI)},
pages = {1--12},
type = {Workshop Paper},
month = {September},
year = {2018},
keywords = {Datenschutz; Internet der Dinge; Sensoren; Industrie 4.0; Datenstr{\"o}me; Smart Devices},
language = {German},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
D.4.6 Operating Systems Security and Protection},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Der Aufschwung des Internets der Dinge (IoT) sorgt f{\"u}r eine voranschreitende
Digitalisierung. Sensoren in Alltagsgegenst{\"a}nden erfassen unterschiedliche
Aspekte des t{\"a}glichen Lebens. Durch eine Vernetzung dieser Ger{\"a}te, lassen
sich die Daten miteinander kombinieren und daraus neues Wissen generieren. In
der Industrie 4.0 werden beispielsweise die am Produktionsprozess beteiligten
cyber-physischen Systeme dazu genutzt, um mit den von ihnen erfassten Daten
Produktionsprozesse zu optimieren. Da auch der Mensch ein relevanter
Bestandteil des Produktionsprozesses ist, werden z.B. mittels Smart Watches
auch {\"u}ber diesen viele Daten erfasst. Nicht erst mit der Einf{\"u}hrung der neuen
Datenschutzgrundverordnung (DSGVO) sind hierbei allerdings
Datenschutzanforderungen zu beachten: Es m{\"u}ssen nicht nur die privaten Daten
der Nutzer gesch{\"u}tzt werden, sondern es muss auch sichergestellt werden, dass
die Datenverarbeitung und -analyse dadurch so wenig wie m{\"o}glich behindert
werden. Wir stellen hierf{\"u}r ein neuartiges Datenschutzkonzept f{\"u}r die
Industrie 4.0 (THOR) vor, mit dem Kompromisse zwischen erforderlichem
Datenschutz und gew{\"u}nschter Datenqualit{\"a}t gefunden werden k{\"o}nnen, der der
DSGVO gen{\"u}gt.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-19&engl=1}
}
@inproceedings {INPROC-2018-18,
author = {Ana Cristina Franco da Silva and Pascal Hirmer and Uwe Breitenb{\"u}cher and Oliver Kopp and Bernhard Mitschang},
title = {{TDLIoT: A Topic Description Language for the Internet of Things}},
booktitle = {ICWE 2018: Web Engineering},
editor = {Tommi Mikkonen and Ralf Klamma and Juan Hern{\'a}ndez},
publisher = {Springer Berlin Heidelberg},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Computer Science (LNCS)},
volume = {10845},
pages = {333--348},
type = {Conference Paper},
month = {May},
year = {2018},
doi = {10.1007/978-3-319-91662-0_27},
keywords = {Internet of Things; Publish-subscribe; Description Language},
language = {English},
cr-category = {K.6 Management of Computing and Information Systems,
D.2.12 Software Engineering Interoperability},
ee = {https://link.springer.com/chapter/10.1007/978-3-319-91662-0_27},
contact = {franco-da-silva@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-18&engl=1}
}
@inproceedings {INPROC-2018-17,
author = {Marc H{\"u}ffmeyer and Pascal Hirmer and Bernhard Mitschang and Ulf Schreier and Matthias Wieland},
title = {{Situation-Aware Access Control for Industrie 4.0}},
booktitle = {ICISSP 2017: Information Systems Security and Privacy},
publisher = {Springer},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Communications in Computer and Information Science},
volume = {867},
pages = {59--83},
type = {Conference Paper},
month = {June},
year = {2018},
keywords = {Authorization; Attribute based access control; Situation-awareness; REST; Internet of Things},
language = {English},
cr-category = {E.0 Data General},
ee = {https://link.springer.com/chapter/10.1007/978-3-319-93354-2_4},
contact = {Pascal.Hirmer@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-17&engl=1}
}
@inproceedings {INPROC-2018-15,
author = {Christoph Stach and Sascha Alpers and Stefanie Betz and Frank D{\"u}rr and Andreas Fritsch and Kai Mindermann and Saravana Murthy Palanisamy and Gunther Schiefer and Manuela Wagner and Bernhard Mitschang and Andreas Oberweis and Stefan Wagner},
title = {{The AVARE PATRON: A Holistic Privacy Approach for the Internet of Things}},
booktitle = {Proceedings of the 15th International Conference on Security and Cryptography (SECRYPT '18)},
publisher = {INSTICC Press},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--8},
type = {Conference Paper},
month = {July},
year = {2018},
keywords = {Privacy; IoT Apps; Smart Things; Stream Processing; Privacy Preferences Elicitation \& Veri\&\#64257; cation},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
D.4.6 Operating Systems Security and Protection},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Software Technology, Software Engineering;
University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
abstract = {Applications for the Internet of Things are becoming increasingly popular. Due
to the large amount of available context data, such applications can be used
effectively in many domains. By interlinking these data and analyzing them, it
is possible to gather a lot of knowledge about a user. Therefore, these
applications pose a threat to privacy. In this paper, we illustrate this threat
by looking at a real-world application scenario. Current state of the art
focuses on privacy mechanisms either for Smart Things or for big data
processing systems. However, our studies show that for a comprehensive privacy
protection a holistic view on these applications is required. Therefore, we
describe how to combine two promising privacy approaches from both categories,
namely AVARE and PATRON. Evaluation results confirm the thereby achieved
synergy effects.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-15&engl=1}
}
@inproceedings {INPROC-2018-14,
author = {Corinna Giebler and Christoph Stach and Holger Schwarz and Bernhard Mitschang},
title = {{BRAID - A Hybrid Processing Architecture for Big Data}},
booktitle = {Proceedings of the 7th International Conference on Data Science, Technology and Applications (DATA 2018)},
publisher = {INSTICC Press},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--8},
type = {Conference Paper},
month = {July},
year = {2018},
keywords = {Big Data; IoT; Batch Processing; Stream Processing; Lambda Architecture; Kappa Architecture},
language = {English},
cr-category = {D.2.11 Software Engineering Software Architectures,
H.2.4 Database Management Systems,
H.2.8 Database Applications},
contact = {Senden Sie eine e-Mail an Corinna.Giebler@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The Internet of Things is applied in many domains and collects vast amounts of
data. This data provides access to a lot of knowledge when analyzed
comprehensively. However, advanced analysis techniques such as predictive or
prescriptive analytics require access to both, history data, i.e., long-term
persisted data, and real-time data as well as a joint view on both types of
data. State-of-the-art hybrid processing architectures for big data - namely,
the Lambda and the Kappa Architecture - support the processing of history data
and real-time data. However, they lack of a tight coupling of the two
processing modes. That is, the user has to do a lot of work manually in order
to enable a comprehensive analysis of the data. For instance, the user has to
combine the results of both processing modes or apply knowledge from one
processing mode to the other. Therefore, we introduce a novel hybrid processing
architecture for big data, called BRAID. BRAID intertwines the processing of
history data and real-time data by adding communication channels between the
batch engine and the stream engine. This enables to carry out comprehensive
analyses automatically at a reasonable overhead.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-14&engl=1}
}
@inproceedings {INPROC-2018-12,
author = {Oliver Kopp and Uwe Breitenb{\"u}cher and Tamara M{\"u}ller},
title = {{CloudRef - Towards Collaborative Reference Management in the Cloud}},
booktitle = {Proceedings of the 10th Central European Workshop on Services and their Composition (ZEUS 2018)},
editor = {Nico Herzberg and Christoph Hochreiner and Oliver Kopp and J{\"o}rg Lenhard},
publisher = {CEUR-WS.org},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {CEUR Workshop Proceedings},
volume = {2072},
pages = {63--68},
type = {Workshop Paper},
month = {April},
year = {2018},
issn = {1613-0073},
language = {English},
cr-category = {H.4.1 Office Automation},
ee = {http://ceur-ws.org/Vol-2072/,
http://ceur-ws.org/Vol-2072/paper10.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-12&engl=1}
}
@inproceedings {INPROC-2018-11,
author = {Oliver Kopp and Anita Armbruster and Olaf Zimmermann},
title = {{Markdown Architectural Decision Records: Format and Tool Support}},
booktitle = {Proceedings of the 10th Central European Workshop on Services and their Composition (ZEUS 2018)},
editor = {Nico Herzberg and Christoph Hochreiner and Oliver Kopp and J{\"o}rg Lenhard},
publisher = {CEUR-WS.org},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {CEUR Workshop Proceedings},
volume = {2072},
pages = {55--62},
type = {Conference Paper},
month = {April},
year = {2018},
issn = {1613-0073},
language = {English},
cr-category = {H.4.1 Office Automation},
ee = {http://ceur-ws.org/Vol-2072/,
http://ceur-ws.org/Vol-2072/paper9.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-11&engl=1}
}
@inproceedings {INPROC-2018-10,
author = {Jan K{\"o}nigsberger and Bernhard Mitschang},
title = {{R2SMA - A Middleware Architecture to Access Legacy Enterprise Web Services using Lightweight REST APIs}},
booktitle = {Proceedings of the 20th International Conference on Enterprise Information Systems},
editor = {Slimane Hammoudi and Michal Smialek and Olivier Camp and Joaquim Filipe},
publisher = {SciTePress},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {704--711},
type = {Conference Paper},
month = {March},
year = {2018},
isbn = {978-989-758-298-1},
keywords = {REST; API; SOAP; Web Service; SOA; Enterprise SOA; Architecture},
language = {English},
cr-category = {H.5.4 Hypertext/Hypermedia,
H.3.5 Online Information Services},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-10&engl=1}
}
@inproceedings {INPROC-2018-05,
author = {Christoph Stach and Bernhard Mitschang},
title = {{CURATOR - A Secure Shared Object Store: Design, Implementation, and Evaluation of a Manageable, Secure, and Performant Data Exchange Mechanism for Smart Devices}},
booktitle = {Proceedings of the 33rd ACM/SIGAPP Symposium On Applied Computing (DTTA)},
publisher = {ACM Press},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--8},
type = {Conference Paper},
month = {April},
year = {2018},
keywords = {data exchange; smart devices; shared object store; security},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
D.4.6 Operating Systems Security and Protection},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Nowadays, smart devices have become incredible popular - literally everybody
has one. Due to an enormous quantity of versatile apps, these devices
positively affect almost every aspect of their users' lives. E.g., there are
apps collecting and monitoring health data from a certain domain such as
diabetes-related or respiration-related data. However, they cannot display
their whole potential since they have only access to their own data and cannot
combine it with data from other apps, e.g., in order to create a comprehensive
electronic health record. On that account, we introduce a seCURe shAred objecT
stORe called CURATOR. In CURATOR apps cannot only manage their own data in an
easy and performant way, but they can also share it with other apps. Since some
of the data is confidential, CURATOR has several security features, including
authentication, fine-grained access control, and encryption. In this paper, we
discuss CURATOR's design and implementation and evaluate its performance.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-05&engl=1}
}
@inproceedings {INPROC-2018-04,
author = {Christoph Stach and Frank D{\"u}rr and Kai Mindermann and Saravana Murthy Palanisamy and Stefan Wagner},
title = {{How a Pattern-based Privacy System Contributes to Improve Context Recognition}},
booktitle = {Proceedings of the 2018 IEEE International Conference on Pervasive Computing and Communications Workshops (CoMoRea)},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--6},
type = {Workshop Paper},
month = {March},
year = {2018},
keywords = {privacy; access control; pattern concealing; stream processing; complex event processing; databases},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
D.4.6 Operating Systems Security and Protection},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Software Technology, Software Engineering;
University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
abstract = {As Smart Devices have access to a lot of user-preferential data, they come in
handy in any situation. Although such data - as well as the knowledge which can
be derived from it - is highly beneficial as apps are able to adapt their
services appropriate to the respective context, it also poses a privacy threat.
Thus, a lot of research work is done regarding privacy. Yet, all approaches
obfuscate certain attributes which has a negative impact on context recognition
and thus service quality. Therefore, we introduce a novel access control
mechanism called PATRON. The basic idea is to control access to information
patterns. For instance, a person suffering from diabetes might not want to
reveal his or her unhealthy eating habit, which can be derived from the pattern
``rising blood sugar level'' -$>$ ``adding bread units''. Such a pattern which must
not be discoverable by some parties (e.g., insurance companies) is called
private pattern whereas a pattern which improves an app's service quality is
labeled as public pattern. PATRON employs different techniques to conceal
private patterns and, in case of available alternatives, selects the one with
the least negative impact on service quality, such that the recognition of
public patterns is supported as good as possible.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-04&engl=1}
}
@inproceedings {INPROC-2018-03,
author = {Christoph Stach and Frank Steimle and Bernhard Mitschang},
title = {{The Privacy Management Platform: An Enabler for Device Interoperability and Information Security in mHealth Applications}},
booktitle = {Proceedings of the 11th International Conference on Health Informatics (HEALTHINF 2018)},
publisher = {SciTePress},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--12},
type = {Conference Paper},
month = {January},
year = {2018},
keywords = {mHealth; Device Interoperability; Information Security; COPD; Privacy Management Platform},
language = {English},
cr-category = {H.5.0 Information Interfaces and Presentation General,
K.6.5 Security and Protection,
K.8 Personal Computing},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Chronic diseases are on the rise. Afflicted patients require persistent therapy
and periodic screenings. This causes high treatment costs and overburdened
physicians. Innovative approaches that enable patients to perform treatment
methods on their own are badly needed. Telemedical approaches with the aid of
modern Smartphones connected to medical devices (the so-called mHealth) can be
the answer. However, mHealth apps face two key challenges, namely device
interoperability and information security. In this paper, we describe how the
Privacy Management Platform (PMP) and its extendable Resources can contribute
to these challenges. Therefore, we analyze a real-world mHealth app and derive
generic functional units, each realizing a certain task recurring frequently
within mHealth apps, e.g., metering, data storage, or data transmission. For
each functional unit we provide a PMP Resource, enabling both, device
interoperability and information security. Finally, we revise the analyzed
mHealth app using the Resources in order to evaluate our approach.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-03&engl=1}
}
@inproceedings {INPROC-2018-02,
author = {Christoph Stach},
title = {{Big Brother is Smart Watching You: Privacy Concerns about Health and Fitness Applications}},
booktitle = {Proceedings of the 4th International Conference on Information Systems Security and Privacy (ICISSP 2018)},
publisher = {SciTePress},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--11},
type = {Conference Paper},
month = {January},
year = {2018},
keywords = {Smartbands; Health and Fitness Applications; Privacy Concerns; Privacy Management Platform},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
D.4.6 Operating Systems Security and Protection,
K.8 Personal Computing},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Health and fitness applications for mobile devices are becoming more and more
popular. Due to novel wearable metering devices, the so-called Smartbands,
these applications are able to capture both health data (e.g., the heart rate)
as well as personal information (e.g., location data) and create a quantified
self for their users. However, many of these applications violate the user's
privacy and misuse the collected data. It becomes apparent that this threat is
inherent in the privacy systems implemented in mobile platforms. Therefore, we
apply the Privacy Policy Model (PPM) a fine-grained and modular expandable
permission model to deals with this problem. We implement our adapted model in
a prototype based on the Privacy Management Platform (PMP). Subsequently, we
evaluate our model with the help the prototype and demonstrate its
applicability for any application using Smartbands for its data acquisition.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-02&engl=1}
}
@inproceedings {INPROC-2018-01,
author = {Christoph Stach and Bernhard Mitschang},
title = {{ACCESSORS: A Data-Centric Permission Model for the Internet of Things}},
booktitle = {Proceedings of the 4th International Conference on Information Systems Security and Privacy (ICISSP 2018).},
publisher = {SciTePress},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--11},
type = {Conference Paper},
month = {January},
year = {2018},
keywords = {Permission Model; Data-Centric; Derivation Transparent; Fine-Grained; Context-Sensitive; IoT},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
D.4.6 Operating Systems Security and Protection},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The Internet of Things (IoT) is gaining more and more relevance. Due to
innovative IoT devices equipped with novel sensors, new application domains
come up continuously. These domains include Smart Homes, Smart Health, and
Smart Cars among others. As the devices not only collect a lot of data about
the user, but also share this information with each other, privacy is a key
issue for IoT applications. However, traditional privacy systems cannot be
applied to the IoT directly due to different requirements towards the
underlying permission models. Therefore, we analyze existing permission models
regarding their applicability in the IoT domain. Based on this analysis, we
come up with a novel permission model, implement it in a privacy system, and
assess its utility.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2018-01&engl=1}
}
@inproceedings {INPROC-2017-70,
author = {Jan K{\"o}nigsberger and Bernhard Mitschang},
title = {{Business Objects plus (BO+): An Approach to Enhance Service Reuse and Integration in Cross-Domain SOA Compounds}},
booktitle = {Proceedings of the 2017 IEEE International Conference on Information Reuse and Integration},
editor = {Chengcui Zhang and Balaji Palanisamy and Latifur Khan and Sahra Sedigh Sarvestani},
address = {Los Alamitos, Washington, Tokyo},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {49--58},
type = {Conference Paper},
month = {August},
year = {2017},
doi = {10.1109/IRI.2017.28},
isbn = {978-1-5386-1562-1},
keywords = {SOA, data model, abstraction, service interface, business objects},
language = {German},
cr-category = {E.2 Data Storage Representations,
H.3.5 Online Information Services},
ee = {http://ieeexplore.ieee.org/document/8102918/},
contact = {Jan K{\"o}nigsberger jan.koenigsberger@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-70&engl=1}
}
@inproceedings {INPROC-2017-66,
author = {Andreas Bader and Oliver Kopp},
title = {{Towards DBCloudBench - A Scenario-Based Database Benchmarking Framework}},
booktitle = {Proceedings of the 11th Advanced Summer School on Service Oriented Computing},
editor = {Johanna Barzen and Rania Khalaf and Frank Leymann and Bernhard Mitschang},
publisher = {IBM Research Report},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {IBM Research Report},
volume = {RC25670 (WAT1710-015)},
pages = {53--5456},
type = {Conference Paper},
month = {October},
year = {2017},
language = {English},
cr-category = {H.4.1 Office Automation},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2017-66/INPROC-2017-66.pdf,
http://domino.watson.ibm.com/library/CyberDig.nsf/1e4115aea78b6e7c85256b360066f0d4/b7ed36faf73949a4852581e7006b2a55!OpenDocument},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Many benchmarks can be used for measuring performance of different types of
databases. To automate the process of benchmark- ing databases, this paper
outlines DBCloudBench. It can be used to automatically setup a scenario and
perform a benchmark run using a standards-based approach. The databases and
benchmarks are stored in {\^a}€œcloud services archives{\^a}€ allowing them to be
reused and combined as necessary. Each benchmark is accompanied with an adapter
for running the benchmark on certain database systems while using DBCloudBench.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-66&engl=1}
}
@inproceedings {INPROC-2017-49,
author = {Eva Hoos and Matthias Wieland and Bernhard Mitschang},
title = {{Analysis Method for Conceptual Context Modeling Applied in Production Environments}},
booktitle = {Proceedings of 20th International Conference on Business Information Systems (BIS)},
publisher = {Springer International Publishing},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {313--325},
type = {Conference Paper},
month = {May},
year = {2017},
keywords = {Context-awareness; production environments; Industry 4.0},
language = {English},
cr-category = {J.1 Administration Data Processing},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-49&engl=1}
}
@inproceedings {INPROC-2017-48,
author = {Eva Hoos and Pascal Hirmer and Bernhard Mitschang},
title = {{Context-Aware Decision Information Packages: An Approach to Human-Centric Smart Factories}},
booktitle = {Proceedings of the 21st European Conference on Advances in Databases and Information Systems (ADBIS)},
publisher = {Springer International Publishing AG 2017},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {42--56},
type = {Conference Paper},
month = {August},
year = {2017},
keywords = {Industry 4.0; Context-awareness; Data Provisioning; Smart Factory},
language = {German},
cr-category = {H.3.3 Information Search and Retrieval},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-48&engl=1}
}
@inproceedings {INPROC-2017-41,
author = {Matthias Wieland and Frank Steimle and Bernhard Mitschang and Dominik Lucke and Peter Einberger and Daniel Schel and Michael Luckert and Thomas Bauernhansl},
title = {{Rule-Based Integration of Smart Services Using the Manufacturing Service Bus}},
booktitle = {Proceedings of 14th IEEE International Conference on Ubiquitous Intelligence and Computing (UIC2017)},
address = {Fremont, USA},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--8},
type = {Conference Paper},
month = {August},
year = {2017},
doi = {10.1109/UIC-ATC.2017.8397529},
keywords = {Rules; Integration; Manufacturing; Smart Factory; Industrie 4.0; Manufacturing Service Bus},
language = {English},
cr-category = {H.4.0 Information Systems Applications General,
I.2.1 Applications and Expert Systems},
ee = {https://ieeexplore.ieee.org/document/8397529/},
contact = {Senden Sie eine E-Mail an Frank.Steimle@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Factories have to adapt permanently to changing situations in order to stay
competitive. Premise to achieve this objective is up-to-date information on all
levels of a factory and during the product life cycle, so that men and machine
can optimize their activities according to their tasks. One approach to
implement this economically is the massive application of sensors and
information and communication technologies (ICT) leading to a Smart Factory.
This process and related applications are summarized under the term of the
forth industrial revolution (Industrie 4.0). It demands a flexible and
easy-to-use integration of assets on the shop floor with ICT systems. The
contribution of this paper is the MIALinx system that enables all these steps.
The steps range from the integration to the sensing and analyzing of the sensor
data to the execution of required actions. Furthermore, MIALinx provides an
abstract rule based approach for users to model the behavior of the system. The
presented system is based on concepts and technologies of the Internet of
Things and service-oriented middleware. The main users targeted with our system
are small and medium-sized enterprises that do not have the expertise or the
investment possibilities to invest in completely new Industrie 4.0 systems but
rather use their existing production assets and enrich them to achieve
Industrie 4.0 capability.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-41&engl=1}
}
@inproceedings {INPROC-2017-40,
author = {Eva Hoos and Pascal Hirmer and Bernhard Mitschang},
title = {{Improving Problem Resolving on the Shop Floor by Context-Aware Decision Information Packages}},
booktitle = {Proceedings of the CAiSE 2017 Forum},
editor = {Xavier Franch and Jolita Ralyt{\'e}},
address = {Essen},
publisher = {CEUR Workshop Proceedings},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {121--128},
type = {Workshop Paper},
month = {June},
year = {2017},
keywords = {Industry 4.0; Context-Awareness; Engineering},
language = {English},
cr-category = {J.1 Administration Data Processing},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-40&engl=1}
}
@inproceedings {INPROC-2017-38,
author = {Christian Weber and Jan K{\"o}nigsberger and Laura Kassner and Bernhard Mitschang},
title = {{M2DDM – A Maturity Model for Data-Driven Manufacturing}},
booktitle = {Manufacturing Systems 4.0 – Proceedings of the 50th CIRP Conference on Manufacturing Systems (CIRP CMS); Taichung, Taiwan, May 3-5, 2017},
editor = {Mitchell M. Tseng and Hung-Yin Tsai and Yue Wang},
publisher = {Elsevier},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Procedia CIRP},
volume = {63},
pages = {173--178},
type = {Conference Paper},
month = {July},
year = {2017},
doi = {https://doi.org/10.1016/j.procir.2017.03.309},
issn = {2212-8271},
keywords = {Maturity Model; Industrie 4.0; Industrial Internet; Reference Architectures; Digital Twin; Edge Analytics},
language = {English},
cr-category = {H.1.0 Information Systems Models and Principles General,
H.4.0 Information Systems Applications General},
ee = {http://www.sciencedirect.com/science/article/pii/S2212827117304973},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-38&engl=1}
}
@inproceedings {INPROC-2017-32,
author = {Christoph Stach and Frank D{\"u}rr and Kai Mindermann and Saravana Murthy Palanisamy and Muhammad Adnan Tariq and Bernhard Mitschang and Stefan Wagner},
title = {{PATRON - Datenschutz in Datenstromverarbeitungssystemen}},
booktitle = {Informatik 2017: Digitale Kulturen, Tagungsband der 47. Jahrestagung der Gesellschaft f{\"u}r Informatik e.V. (GI), 25.09. - 29.09.2017, Technische Universit{\"a}t Chemnitz},
publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {LNI},
pages = {1--12},
type = {Workshop Paper},
month = {September},
year = {2017},
keywords = {Datenschutz; Zugriffskontrolle; Datenstr{\"o}me; Internet der Dinge; Privatheit; Sensoren},
language = {German},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
D.4.6 Operating Systems Security and Protection},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Software Technology, Software Engineering;
University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
abstract = {Angetrieben durch die stetig voranschreitende Digitalisierung gewinnt das
Internet der Dinge (engl. IoT) immer mehr an Bedeutung. Im IoT werden
technische Ger{\"a}te mit unterschiedlichen Sensoren ausgestattet und miteinander
vernetzt. Dadurch werden neuartige Anwendungen beispielsweise im Bereich
E-Health erm{\"o}glicht, in denen Sensordaten miteinander kombiniert und so in
h{\"o}herwertige Informationen umgewandelt werden. Die von diesen Anwendungen
abgeleiteten Informationen verraten viel {\"u}ber den Nutzer und m{\"u}ssen daher
besonders gesch{\"u}tzt werden. H{\"a}ufig hat der Nutzer allerdings keine Kontrolle
{\"u}ber die Verarbeitung seiner Daten, ganz davon zu schweigen, dass er das Ausma{\ss}
und die Art der daraus ableitbaren Informationen nicht ermessen kann.
In diesem Artikel stellen wir daher einen neuartigen Kontrollmechanismus vor,
der private Informationen im IoT sch{\"u}tzt. Anstelle von abstrakten
Datenschutzregeln f{\"u}r einzelne Sensoren definiert der Nutzer Muster, die es zu
sch{\"u}tzen gilt. Ein Muster kann beispielsweise eine Kombination aus Messwerten
sein, die auf eine bestimmte Krankheit schlie{\ss}en lassen. Der Nutzer definiert
die zu verheimlichenden Informationen nat{\"u}rlichsprachlich, und ein
Dom{\"a}nenexperte setzt diese in formale Regeln um. Sind diese Regeln zu
restriktiv, so kann die Anwendung ihre angedachte Funktionalit{\"a}t nicht
erbringen. Daher muss bez{\"u}glich der Servicequalit{\"a}t ein Kompromiss zwischen
gew{\"u}nschter Privatheit und ben{\"o}tigter Funktionalit{\"a}t gefunden werden.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-32&engl=1}
}
@inproceedings {INPROC-2017-28,
author = {Ana Cristina Franco da Silva and Uwe Breitenb{\"u}cher and Pascal Hirmer and K{\'a}lm{\'a}n K{\'e}pes and Oliver Kopp and Frank Leymann and Bernhard Mitschang and Ronald Steinke},
title = {{Internet of Things Out of the Box: Using TOSCA for Automating the Deployment of IoT Environments}},
booktitle = {Proceedings of the 7th International Conference on Cloud Computing and Services Science (CLOSER)},
editor = {Donald Ferguson and V{\'\i}ctor M{\'e}ndez Mu{\~n}oz and Jorge Cardoso and Markus Helfert and Claus Pahl},
publisher = {SciTePress Digital Library},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {ScitePress},
volume = {1},
pages = {358--367},
type = {Conference Paper},
month = {June},
year = {2017},
isbn = {978-989-758-243-1},
doi = {10.5220/0006243303580367},
keywords = {Internet of Things; TOSCA; Application Deployment; Device Software},
language = {English},
cr-category = {K.6 Management of Computing and Information Systems,
D.2.12 Software Engineering Interoperability},
ee = {http://scitepress.org/DigitalLibrary/PublicationsDetail.aspx?ID=AuNrRtS4cNc=&t=1},
contact = {franco-da-silva@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-28&engl=1}
}
@inproceedings {INPROC-2017-27,
author = {C. Timurhan Sungur and Uwe Breitenb{\"u}cher and Oliver Kopp and Frank Leymann and Andreas Wei{\ss}},
title = {{Identifying Relevant Resources and Relevant Capabilities of Informal Processes}},
booktitle = {Proceedings of the 19th International Conference on Enterprise Information Systems (ICEIS 2017)},
publisher = {SciTePress},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {295--307},
type = {Conference Paper},
month = {April},
year = {2017},
keywords = {Informal Processes; Unstructured Processes; Resource Discovery; Capability Discovery; Relevant Resources; Relevant Capabilities},
language = {English},
cr-category = {H.4.1 Office Automation,
H.3.3 Information Search and Retrieval,
H.3.4 Information Storage and Retrieval Systems and Software,
H.5.3 Group and Organization Interfaces},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-27&engl=1}
}
@inproceedings {INPROC-2017-26,
author = {Corinna Giebler and Christoph Stach},
title = {{Datenschutzmechanismen f{\"u}r Gesundheitsspiele am Beispiel von Secure Candy Castle}},
booktitle = {Tagungsband der 15. GI-Fachtagung Datenbanksysteme f{\"u}r Business, Technologie und Web},
editor = {Bernhard Mitschang and Daniela Nicklas and Frank Leymann and Harald Sch{\"o}ning and Melanie Herschel and Jens Teubner and Theo H{\"a}rder and Oliver Kopp and Matthias Wieland},
publisher = {GI},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Informatics (LNI)},
volume = {265},
pages = {311--320},
type = {Conference Paper},
month = {March},
year = {2017},
isbn = {978-3-88579-659-6},
keywords = {mHealth-Apps; Datensicherheit; Datenschutz; Datenintegration; Interoperabilit{\"a}t},
language = {German},
cr-category = {J.3 Life and Medical Sciences,
K.4.1 Computers and Society Public Policy Issues},
ee = {http://btw2017.informatik.uni-stuttgart.de/?pageId=Proceedings&language=de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Smartphones sind mittlerweile ein fester Bestandteil des modernen Lebens. Sie
erzeugen, speichern und verarbeiten eine Vielzahl an privaten Daten. Auch im
Gesundheitssektor werden sie zunehmend eingesetzt; die dabei entstehenden Daten
sind besonders sch{\"u}tzenswert. In dieser Arbeit werden daher Konzepte
eingef{\"u}hrt, die Nutzern die Kontrolle {\"u}ber ihre Gesundheitsdaten geben. Zu
diesem Zweck wird Secure Candy Castle, ein Spiel f{\"u}r Kinder mit einer
Diabeteserkrankung, das mit einem Berechtigungsmanagementsystem verbunden ist,
vorgestellt. Der Nutzer kann den Funktionsumfang des Spiels einschr{\"a}nken,
wodurch die App weniger Berechtigungen erh{\"a}lt. Zus{\"a}tzlich werden f{\"u}r SCC
Komponenten entwickelt, die die Interoperabilit{\"a}t von Smartphones mit
medizinischen Messger{\"a}ten verbessert. Die Evaluation zeigt, dass mit SCC alle
aktuellen Probleme von Gesundheits-Apps adressiert werden. Die Konzepte sind
generisch und lassen sich auf beliebige andere Gesundheits-Apps anwenden.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-26&engl=1}
}
@inproceedings {INPROC-2017-25,
author = {Karoline Saatkamp and Uwe Breitenb{\"u}cher and Oliver Kopp and Frank Leymann},
title = {{Topology Splitting and Matching for Multi-Cloud Deployments}},
booktitle = {Proceedings of the 7th International Conference on Cloud Computing and Services Science (CLOSER 2017)},
publisher = {SciTePress},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {247--258},
type = {Conference Paper},
month = {April},
year = {2017},
isbn = {978-989-758-243-1},
keywords = {Application Deployment; Distribution; Splitting; Cloud Computing; TOSCA},
language = {English},
cr-category = {G.0 Mathematics of Computing General,
H.0 Information Systems General},
ee = {http://closer.scitevents.org},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {For automating the deployment of applications in cloud environments, a variety
of deployment automation technologies have been developed in recent years.
These technologies enable specifying the desired deployment in the form of
deployment models, which can be automatically executed. However, changing
internal or external conditions often lead to strategical decisions that must
be reflected in all deployment models of a company’s IT. Unfortunately, while
creating such deployment models is difficult, adapting them is even harder as
typically a variety of technologies must be replaced. In this paper, we present
the Split and Match Method that enables splitting a deployment model following
a manually specified distribution on the business layer. The method also
enables automatically deploying the resulting model without the need for a
manual intervention and, thus, significantly eases reflecting strategical
decisions on the technical deployment layer. We present a formalization and
algorithms to automate the steps of the method. Moreover, we validate the
practical feasibility of the presented concepts by a prototype based on the
TOSCA standard and the OpenTOSCA ecosystem.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-25&engl=1}
}
@inproceedings {INPROC-2017-21,
author = {Tim Waizenegger and Frank Wagner and Cataldo Mega},
title = {{SDOS: Using Trusted Platform Modules for Secure Cryptographic Deletion in the Swift Object Store}},
booktitle = {Proc. 20th International Conference on Extending Database Technology (EDBT), March 21-24, 2017 - Venice, Italy},
publisher = {OpenProceedings.org},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {550--553},
type = {Demonstration},
month = {January},
year = {2017},
isbn = {978-3-89318-073-8},
keywords = {secure data deletion; cryptographic deletion; data erasure; records management; retention management; key management; data shredding; trusted platform module; TPM},
language = {German},
cr-category = {E.3 Data Encryption},
ee = {http://openproceedings.org/2017/conf/edbt/paper-408.pdf},
contact = {tim.waizenegger@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-21&engl=1}
}
@inproceedings {INPROC-2017-20,
author = {Tim Waizenegger},
title = {{Secure Cryptographic Deletion in the Swift Object Store}},
booktitle = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2017), 17. Fachtagung des GI-Fachbereichs Datenbanken und Informationssysteme (DBIS), 6.-10. M{\"a}rz 2017, Stuttgart, Germany, Proceedings},
publisher = {GI},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {625--628},
type = {Conference Paper},
month = {January},
year = {2017},
isbn = {978-3-88579-659-6},
language = {English},
cr-category = {E.3 Data Encryption},
ee = {http://btw2017.informatik.uni-stuttgart.de/slidesandpapers/I-18-29/paper_web.pdf},
contact = {tim.waizenegger@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-20&engl=1}
}
@inproceedings {INPROC-2017-19,
author = {Tim Waizenegger},
title = {{BTW 2017 Data Science Challenge (SDSC17)}},
booktitle = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2017), 17. Fachtagung des GI-Fachbereichs Datenbanken und Informationssysteme (DBIS), 6.-10. M{\"a}rz 2017, Stuttgart, Germany, Workshopband},
publisher = {GI},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {405--406},
type = {Conference Paper},
month = {January},
year = {2017},
isbn = {978-3-88579-660-2},
language = {German},
cr-category = {A.0 General Literature, General},
ee = {http://dblp.uni-trier.de/rec/bib/conf/btw/Waizenegger17a,
https://www.gi.de/service/publikationen/lni/gi-edition-proceedings-2017/gi-edition-lecture-notes-in-informatics-lni-p-266.html},
contact = {tim.waizenegger@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Im Rahmen der Data Science Challenge haben Studierende und Doktoranden die
M{\"o}glichkeit, einen eigenen Ansatz zur Cloud-basierten Datenanalyse zu
entwickeln und damit gegen andere Teilnehmer anzutreten. Auf der BTW2017 in
Stuttgart pr{\"a}sentieren die Teilnehmer Ihre Ergebnisse die von einer Fachjury
aus Forschung und Industrie bewertet werden. Die Gewinner, sowie die
N{\"a}chstplatzierten, werden mit einem Preisgeld honoriert.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-19&engl=1}
}
@inproceedings {INPROC-2017-18,
author = {Cornelia Kiefer},
title = {{Die Gratwanderung zwischen qualitativ hochwertigen und einfach zu erstellenden dom{\"a}nenspezifischen Textanalysen}},
booktitle = {GI-Edition Lecture Notes in Informatics Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2017) Workshopband},
editor = {B. Mitschang et al.},
address = {Bonn},
publisher = {Gesellschaft f{\"u}r Informatik},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {99--104},
type = {Workshop Paper},
month = {March},
year = {2017},
keywords = {Textanalyse, Datenqualit{\"a}t, Analysequalit{\"a}t, {\"u}berwachte maschinelle Lernverfahren, Textanalyse in den Geisteswissenschaften},
language = {German},
cr-category = {H.3 Information Storage and Retrieval},
ee = {http://btw2017.informatik.uni-stuttgart.de/pro/P-266-BTW2017-Workshopband.pdf,
http://btw2017.informatik.uni-stuttgart.de/slidesandpapers/E1-12/paper_web.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Die Textanalyse ist zu einem entscheidenden Werkzeug in verschiedenen Dom{\"a}nen
wie den Geisteswissenschaften, Naturwissenschaften sowie auch in der Industrie
geworden. Eine der gr{\"o}{\ss}ten Herausforderungen bei dom{\"a}nenspezifischen
Textanalyseprojekten besteht darin, das Wissen aus den Bereichen IT und Text
Mining mit dem Wissen aus der Dom{\"a}ne zusammenzubringen. Viele
Textanalysetoolkits werden deshalb speziell f{\"u}r den Gebrauch durch
Dom{\"a}nenexperten ohne oder mit wenig IT und Textanalysewissen vereinfacht. In
diesem Beitrag diskutieren wir, inwiefern diese Vereinfachungen zu
Qualit{\"a}tsproblemen bei der Analyse von unsauberen Daten f{\"u}hren k{\"o}nnen.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-18&engl=1}
}
@inproceedings {INPROC-2017-13,
author = {Marc H{\"u}ffmeyer and Pascal Hirmer and Bernhard Mitschang and Ulf Schreier and Matthias Wieland},
title = {{SitAC – A System for Situation-aware Access Control - Controlling Access to Sensor Data}},
booktitle = {Proceedings of the 3rd International Conference on Information Systems Security and Privacy},
editor = {Paolo Mori and Steven Furnell and Olivier Camp},
address = {Porto, Portugal},
publisher = {SciTePress},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {ScitePress},
volume = {1},
pages = {113--125},
type = {Conference Paper},
month = {March},
year = {2017},
isbn = {978-989-758-209-7},
keywords = {Authorization; Attribute based Access Control; Situation-awareness; REST; Internet of Things},
language = {English},
cr-category = {J.6 Computer-Aided Engineering,
H.3.1 Content Analysis and Indexing},
ee = {http://www.scitepress.org/DigitalLibrary/PublicationsDetail.aspx?ID=PZW1ep7OUUk%3d&t=1},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-13&engl=1}
}
@inproceedings {INPROC-2017-12,
author = {Christian Endres and Uwe Breitenb{\"u}cher and Michael Falkenthal and Oliver Kopp and Frank Leymann and Johannes Wettinger},
title = {{Declarative vs. Imperative: Two Modeling Patterns for the Automated Deployment of Applications}},
booktitle = {Proceedings of the 9th International Conference on Pervasive Patterns and Applications (PATTERNS)},
publisher = {Xpert Publishing Services},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {22--27},
type = {Conference Paper},
month = {February},
year = {2017},
isbn = {978-1-61208-534-0},
keywords = {Modeling Patterns; Application Deployment and Management; Automation; Cloud Computing},
language = {English},
cr-category = {C.0 Computer Systems Organization, General,
D.2.9 Software Engineering Management,
D.2.13 Software Engineering Reusable Software},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {In the field of cloud computing, the automated deployment of applications is of
vital importance and supported by diverse management technologies. However,
currently there is no systematic knowledge collection that points out
commonalities, capabilities, and differences of these approaches. This paper
aims at identifying common modeling principles employed by technologies to
create automatically executable models that describe the deployment of
applications. We discuss two fundamental approaches for modeling the automated
deployment of applications: imperative procedural models and declarative
models. For these two approaches, we identified (i) basic pattern primitives
and (ii) documented these approaches as patterns that point out frequently
occurring problems in certain contexts including proven modeling solutions. The
introduced patterns foster the understanding of common application deployment
concepts, are validated regarding their occurrence in established
state-of-the-art technologies, and enable the transfer of that knowledge.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-12&engl=1}
}
@inproceedings {INPROC-2017-11,
author = {Pascal Hirmer},
title = {{Effizienz-Optimierung daten-intensiver Data Mashups am Beispiel von Map-Reduce}},
booktitle = {Proceedings der Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW}, 17. Fachtagung des GI-Fachbereichs, Workshopband},
editor = {Bernhard Mitschang and Norbert Ritter and Holger Schwarz and Meike Klettke and Andreas Thor and Oliver Kopp and Matthias Wieland},
address = {Stuttgart},
publisher = {Gesellschaft f{\"u}r Informatik (GI)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {LNI},
volume = {P-266},
pages = {111--116},
type = {Conference Paper},
month = {March},
year = {2017},
isbn = {978-3-88579-660-2},
keywords = {Data Mashups; Map-Reduce; Big Data; Effizienzoptimierung},
language = {German},
cr-category = {E.0 Data General,
H.2 Database Management,
H.3 Information Storage and Retrieval,
H.4 Information Systems Applications},
ee = {http://btw2017.informatik.uni-stuttgart.de/slidesandpapers/E1-14/paper_web.pdf},
contact = {pascal.hirmer@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Data Mashup-Ans{\"a}tze und -Tools bieten einen einfachen und schnellen Weg, um
Daten zu verarbeiten und zu analysieren. {\"U}ber eine grafische Oberfl{\"a}che k{\"o}nnen
dabei -- in der Regel grafisch -- Datenquellen und Datenoperationen sowie der
Datenfluss einfach modelliert werden. Hierdurch ergeben sich vor allem Vorteile
durch einfache Bedienbarkeit durch Dom{\"a}nennutzer sowie einer explorativen
Vorgehensweise. Jedoch legen vorhandene Data Mashup-Ans{\"a}tze und -Tools wenig
Wert auf die Effizienz der Ausf{\"u}hrung, was dadurch begr{\"u}ndet wird, dass durch
Data Mashups in der Regel kleine Datenmengen verarbeitet werden. Zu Zeiten von
Big Data gilt dies jedoch nicht mehr; schon scheinbar kleine Szenarien
enthalten oftmals eine Vielzahl an Daten. Um mit diesem Problem zuk{\"u}nftig
umzugehen, stellen wir in diesem Paper eine Konzeptidee am Beispiel von
Map-Reduce vor, mit der die Ausf{\"u}hrung von Data Mashups bzgl. Effizienz
optimiert werden kann.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-11&engl=1}
}
@inproceedings {INPROC-2017-06,
author = {Andreas Bader and Oliver Kopp and Michael Falkenthal},
title = {{Survey and Comparison of Open Source Time Series Databases}},
booktitle = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW2017) -- Workshopband},
editor = {Bernhard Mitschang and Norbert Ritter and Holger Schwarz and Meike Klettke and Andreas Thor and Oliver Kopp and Matthieas Wieland},
publisher = {K{\"o}llen Druck+Verlag GmbH},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Informatics (LNI)},
volume = {P-266},
pages = {249--268},
type = {Workshop Paper},
month = {March},
year = {2017},
isbn = {978-3-88579-660-2},
issn = {1617-5468},
language = {English},
cr-category = {H.2.4 Database Management Systems,
H.3.4 Information Storage and Retrieval Systems and Software,
C.2.4 Distributed Systems},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2017-06/INPROC-2017-06.pdf,
http://btw2017.informatik.uni-stuttgart.de/},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-06&engl=1}
}
@inproceedings {INPROC-2017-05,
author = {Laura Kassner and Pascal Hirmer and Matthias Wieland and Frank Steimle and Jan K{\"o}nigsberger and Bernhard Mitschang},
title = {{The Social Factory: Connecting People, Machines and Data in Manufacturing for Context-Aware Exception Escalation}},
booktitle = {Proceedings of the 50th Hawaii International Conference on System Sciences},
publisher = {Online},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--10},
type = {Conference Paper},
month = {January},
year = {2017},
isbn = {978-0-9981331-0-2},
keywords = {decision support; internet of things; smart manufacturing; social media; text analytics},
language = {English},
cr-category = {E.0 Data General,
H.2 Database Management,
H.3 Information Storage and Retrieval,
H.4 Information Systems Applications},
ee = {http://hdl.handle.net/10125/41355},
contact = {pascal.hirmer@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Manufacturing environments are socio-technical systems $\backslash$ where people have to
interact with machines to achieve $\backslash$ a common goal. The goal of the fourth
industrial revolution is $\backslash$ to improve their flexibility for mass customization
and rapidly $\backslash$ changing production conditions. As a contribution towards $\backslash$ this
goal, we introduce the Social Factory: a social network $\backslash$ with a powerful
analytics backend to improve the connection $\backslash$ between the persons working in
the production environment, $\backslash$ the manufacturing machines, and the data that is
created $\backslash$ in the process. We represent machines, people and chatbots $\backslash$ for
information provisioning as abstract users in the social $\backslash$ network. We enable
natural language based communication between $\backslash$ them and provide a rich
knowledge base and automated $\backslash$ problem solution suggestions. Access to complex
production $\backslash$ environments thus becomes intuitive, cooperation among users $\backslash$
improves and problems are resolved more easily.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-05&engl=1}
}
@inproceedings {INPROC-2017-04,
author = {Matthias Wieland and Pascal Hirmer and Frank Steimle and Christoph Gr{\"o}ger and Bernhard Mitschang and Eike Rehder and Dominik Lucke and Omar Abdul Rahman and Thomas Bauernhansl},
title = {{Towards a Rule-Based Manufacturing Integration Assistant}},
booktitle = {Proceedings of the 49th CIRP Conference on Manufacturing Systems (CIRP-CMS 2016); Stuttgart, Germany, May 25-27, 2016},
editor = {Engelbert Westk{\"a}mper and Thomas Bauernhansl},
publisher = {Elsevier},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Procedia CIRP},
volume = {57},
pages = {213--218},
type = {Conference Paper},
month = {January},
year = {2017},
doi = {10.1016/j.procir.2016.11.037},
keywords = {Rules; Integration; Manufacturing; Smart-Factory; Industrie 4.0},
language = {English},
cr-category = {H.4.0 Information Systems Applications General,
J.2 Physical Sciences and Engineering,
I.2.1 Applications and Expert Systems,
I.2.4 Knowledge Representation Formalisms and Methods},
ee = {http://www.sciencedirect.com/science/article/pii/S221282711631191X},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Recent developments and steadily declining prices in ICT enable an economic
application of advanced digital tools in wide areas of manufacturing. Solutions
based on concepts and technologies of the Internet of Things or Cyber Physical
Systems can be used to implement monitoring as well as self-organization of
production, maintenance or logistics processes. However, integration of new
digital tools in existing heterogeneous manufacturing IT systems and
integration of machines and devices into manufacturing environments is an
expensive and tedious task. Therefore, integration issues on IT and
manufacturing level significantly prevent agile manufacturing. Especially small
and medium-sized enterprises do not have the expertise or the investment
possibilities to realize such an integration. To tackle this issue, we present
the approach of the Manufacturing Integration Assistant - MIALinx. The
objective is to develop and implement a lightweight and easy-to-use integration
solution for small and medium-sized enterprises based on recent web automation
technologies. MIALinx aims to simplify the integration using simple
programmable, flexible and reusable IF-THEN rules that connect occurring
situations in manufacturing, such as a machine break down, with corresponding
actions, e.g., an automatic maintenance order generation. For this purpose,
MIALinx connects sensors and actuators based on defined rules whereas the rule
set is defined in a domain-specific, easy-to-use manner to enable rule modeling
by domain experts. Through the definition of rule sets, the workers{\^a}€™
knowledge can be also externalized. Using manufacturing-approved cloud
computing technologies, we enable robustness, security, and a low-effort,
low-cost integration of MIALinx into existing manufacturing environments to
provide advanced digital tools also for small and medium-sized enterprises.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-04&engl=1}
}
@inproceedings {INPROC-2016-54,
author = {Henri Tokola and Christoph Gr{\"o}ger and Eeva J{\"a}rvenp{\"a}{\"a} and Esko Niemi},
title = {{Designing Manufacturing Dashboards on the Basis of a Key Performance Indicator Survey}},
booktitle = {Proceedings of the 49th CIRP Conference on Manufacturing Systems (CIRP CMS)},
publisher = {Elsevier},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Procedia CIRP},
volume = {57},
pages = {619--624},
type = {Conference Paper},
month = {May},
year = {2016},
keywords = {Dashboards; Key Performance Indicators (KPIs); Scorecard},
language = {English},
cr-category = {J.1 Administration Data Processing},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Target-oriented and real-time information provisioning across all hierarchy
levels, from shop floor to top floor, is an important success factory for
manufacturing companies to facilitate agile and efficient manufacturing. In
general, dashboards – in terms of digital single-screen displays – address this
challenge and support intuitive monitoring and visualisation of business
performance information. Yet, existing dashboard research mainly focus on IT
issues and lack a systematic study of the dashboard content. To address this
gap, in this paper, we design three representative dashboards for manufacturing
companies based on a comprehensive survey that focuses on suitable key
performance indicators for different manufacturing target groups. The paper
consists of three parts. First, the paper provides a literature review about
design principles of dashboards. Second, it publishes the results of a survey
of manufacturing companies on preferred key performance indicators (KPIs) for
dashboards and the use of dashboards. Third, using the results obtained from
the survey, three representative manufacturing dashboards are designed: an
operational dashboard for workers, a tactical dashboard for managers and a
strategy dashboard for executives. The results underline that different KPIs
are preferred for dashboards on different hierarchy levels and that mobile
usage of dashboards, especially on tablet pcs, is favoured.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-54&engl=1}
}
@inproceedings {INPROC-2016-42,
author = {Frank Steimle and Matthias Wieland},
title = {{ECHO – An mHealth Solution to Support Treatment of Chronic Patients}},
booktitle = {Proceedings of the 8th Central European Workshop on Services and their Composition, ZEUS 2016},
editor = {Christoph Hochreiner and Stefan Schulte},
publisher = {CEUR Workshop Proceedings},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {64--67},
type = {Demonstration},
month = {February},
year = {2016},
keywords = {mHealth; eHealth; Monitoring; Cloud Computing; Analysis},
language = {German},
cr-category = {C.2.4 Distributed Systems,
H.2.8 Database Applications,
J.3 Life and Medical Sciences},
ee = {http://ceur-ws.org/Vol-1562/},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {More and more people all over the world suffer from chronic diseases, like
asthma. The German-Greek bilateral research project Enhancing Chronic Patients
Health Online developed online services for physicians and patients for use on
smart phones or web browsers, in order to improve monitoring of those patients
and to be able to detect possible exacerbations earlier. During the project we
have developed smart phone applications and websites for both patients and
physicians and a cloud-based health data management system. This demonstration
shows how our system supports physicians and patients.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-42&engl=1}
}
@inproceedings {INPROC-2016-39,
author = {Ana Cristina Franco da Silva and Uwe Breitenb{\"u}cher and K{\'a}lm{\'a}n K{\'e}pes and Oliver Kopp and Frank Leymann},
title = {{OpenTOSCA for IoT: Automating the Deployment of IoT Applications based on the Mosquitto Message Broker}},
booktitle = {Proceedings of the 6th International Conference on the Internet of Things (IoT)},
address = {Stuttgart},
publisher = {ACM},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {181--182},
type = {Demonstration},
month = {November},
year = {2016},
isbn = {978-1-4503-4814-0/16/11},
doi = {10.1145/2991561.2998464},
keywords = {Internet of Things; Cyber-Physical Systems; Sensor Integration; Message Broker; Mosquitto; MQTT; TOSCA},
language = {English},
cr-category = {K.6 Management of Computing and Information Systems,
D.2.12 Software Engineering Interoperability},
contact = {For questions, feel free to contact me franco-da-silva@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {Automating the deployment of IoT applications is a complex challenge,
especially if multiple heterogeneous sensors, actuators, and business
components have to be integrated. This demonstration paper presents a generic,
standards-based system that is able to fully automatically deploy IoT
applications based on the TOSCA standard, the standardized MQTT messaging
protocol, the Mosquitto message broker, and the runtime environment OpenTOSCA.
We describe a demonstration scenario and explain in detail how this scenario
can be deployed fully automatically using the mentioned technologies.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-39&engl=1}
}
@inproceedings {INPROC-2016-35,
author = {Jan K{\"o}nigsberger and Bernhard Mitschang},
title = {{A Semantically-enabled SOA Governance Repository}},
booktitle = {Proceedings of the 2016 IEEE 17th International Conference on Information Reuse and Integration},
editor = {IEEE Computer Society},
address = {Los Alamitos, California, Washington, Tokyo},
publisher = {IEEE Computer Society Conference Publishing Services},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {423--432},
type = {Conference Paper},
month = {August},
year = {2016},
isbn = {978-1-5090-3207-5},
keywords = {SOA; Governance; Repository; Semantic Web},
language = {English},
cr-category = {D.2.11 Software Engineering Software Architectures,
H.3.5 Online Information Services,
I.2.4 Knowledge Representation Formalisms and Methods},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Companies in today's world need to cope with an ever greater need for flexible
and agile IT systems to keep up with the competition and rapidly changing
markets. This leads to increasingly complex system landscapes that are often
realized using service-oriented architectures (SOA). Companies often struggle
to handle the complexity and the governance activities necessary after this
paradigm shift.
We therefore present a semantically-enabled SOA Governance Repository as the
central tool to manage and govern all SOA-related activities within a company.
This repository is based on our previously defined key governance aspects as
well as our SOA Governance Meta Model (SOA-GovMM). We describe how our
repository is able to support and improve the speed and flexibility of
company's IT processes.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-35&engl=1}
}
@inproceedings {INPROC-2016-30,
author = {Pascal Hirmer},
title = {{Flexible Execution and Modeling of Data Processing and Integration Flows}},
booktitle = {Proceedings of the 10th Advanced Summer School on Service Oriented Computing},
editor = {Johanna Barzen and Rania Khalaf and Frank Leymann and Bernhard Mitschang},
publisher = {IBM Research Report},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {26--40},
type = {Conference Paper},
month = {September},
year = {2016},
keywords = {Big Data; Data Integration; Data Flows; Pipes and Filters},
language = {English},
cr-category = {E.0 Data General,
E.1 Data Structures,
H.1 Models and Principles},
ee = {http://domino.research.ibm.com/library/cyberdig.nsf/papers/EC7D5D883519DC7E85258035004DBD19/$File/rc25624.pdf},
contact = {Pascal.Hirmer@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Today, the amount of data highly increases within all domains due to cheap
hardware, fast network connections, and an increasing digitization. Deriving
information and, as a consequence, knowledge from this huge amount of data is a
complex task. Data sources are oftentimes very heterogeneous, dynamic, and
distributed. This makes it difficult to extract, transform, process and
integrate data, which is necessary to gain this knowledge. Furthermore,
extracting knowledge oftentimes requires technical experts with the necessary
skills to conduct the required techniques. For my PhD thesis, I am working on a
new and improved approach for data extraction, processing, and integration by:
(i) facilitating the definition and processing of data processing and
integration scenarios through graphical creation of flow models, (ii) enabling
an ad-hoc, iterative and explorative approach to receive high-quality results,
and (iii) a flexible execution of the data processing tailor-made for users’
non-functional requirements. By providing these means, I enable a more flexible
data processing by a wider range of users, not only limited to technical
experts. This paper describes the approach of the thesis as well as the
publications until today.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-30&engl=1}
}
@inproceedings {INPROC-2016-29,
author = {Cornelia Kiefer},
title = {{Assessing the Quality of Unstructured Data: An Initial Overview}},
booktitle = {Proceedings of the LWDA 2016 Proceedings (LWDA)},
editor = {Ralf Krestel and Davide Mottin and Emmanuel M{\"u}ller},
address = {Aachen},
publisher = {CEUR Workshop Proceedings},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {62--73},
type = {Conference Paper},
month = {September},
year = {2016},
isbn = {1613-0073},
keywords = {quality of unstructured data, quality of text data, data, quality dimensions, data quality assessment, data quality metrics},
language = {English},
cr-category = {A.1 General Literature, Introductory and Survey,
I.2.7 Natural Language Processing},
ee = {http://ceur-ws.org/Vol-1670/paper-25.pdf,
http://ceur-ws.org/Vol-1670/},
contact = {cornelia.kiefer@gsame.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In contrast to structured data, unstructured data such as texts, speech, videos
and pictures do not come with a data model that enables a computer to use them
directly. Nowadays, computers can interpret the knowledge encoded in
unstructured data using methods from text analytics, image recognition and
speech recognition. Therefore, unstructured data are used increasingly in
decision-making processes. But although decisions are commonly based on
unstructured data, data quality assessment methods for unstructured data are
lacking. We consider data analysis pipelines built upon two types of data
consumers, human consumers that usually come at the end of the pipeline and
non-human / machine consumers (e.g., natural language processing modules such
as part of speech tagger and named entity recognizer) that mainly work
intermediate. We define data quality of unstructured data via (1) the
similarity of the input data to the data expected by these consumers of
unstructured data and via (2) the similarity of the input data to the data
representing the real world. We deduce data quality dimensions from the
elements in analytic pipelines for unstructured data and characterize them.
Finally, we propose automatically measurable indicators for assessing the
quality of unstructured text data and give hints towards an implementation.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-29&engl=1}
}
@inproceedings {INPROC-2016-28,
author = {C. Timurhan Sungur and Uwe Breitenb{\"u}cher and Oliver Kopp and Frank Leymann and Mozi Song and Andreas Wei{\ss} and Christoph Mayr-Dorn and Schahram Dustdar},
title = {{Identifying Relevant Resources and Relevant Capabilities of Collaborations - A Case Study}},
booktitle = {Proceedings of the 2016 IEEE 20th International Enterprise Distributed Object Computing Workshop (EDOCW)},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {352--355},
type = {Demonstration},
month = {September},
year = {2016},
keywords = {Organizational performance; resource discovery; capability discovery; relevant resources; relevant capabilities; informal processes; unstructured processes},
language = {English},
cr-category = {H.4.1 Office Automation,
H.3.3 Information Search and Retrieval,
H.3.4 Information Storage and Retrieval Systems and Software,
H.5.3 Group and Organization Interfaces},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {Organizational processes involving collaborating resources, such as development
processes, innovation processes, and decision-making processes, typically
affect the performance of many organizations. Moreover, including required but
missing, resources and capabilities of collaborations can improve the
performance of corresponding processes drastically. In this work, we
demonstrate the extended Informal Process Execution (InProXec) method for
identifying resources and capabilities of collaborations using a case study on
the Apache jclouds project.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-28&engl=1}
}
@inproceedings {INPROC-2016-25,
author = {Pascal Hirmer and Matthias Wieland and Uwe Breitenb{\"u}cher and Bernhard Mitschang},
title = {{Dynamic Ontology-based Sensor Binding}},
booktitle = {Advances in Databases and Information Systems. 20th East European Conference, ADBIS 2016, Prague, Czech Republic, August 28-31, 2016, Proceedings},
address = {Prague, Czech Republic},
publisher = {Springer International Publishing},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Information Systems and Applications, incl. Internet/Web, and HCI},
volume = {9809},
pages = {323--337},
type = {Conference Paper},
month = {August},
year = {2016},
isbn = {978-3-319-44039-2},
isbn = {978-3-319-44038-5},
doi = {10.1007/978-3-319-44039-2},
keywords = {Internet of Things; Sensors; Ontologies; Data Provisioning},
language = {English},
cr-category = {E.0 Data General,
B.8 Performance and Reliability},
ee = {http://www.springer.com/de/book/9783319440385},
contact = {pascal.hirmer@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {In recent years, the Internet of Things gains more and more attention through
cheap hardware devices and, consequently, an increased interconnection of them.
These devices equipped with sensors and actuators form the foundation for so
called smart environments that enable monitoring as well as self-organization.
However, an efficient sensor registration, binding, and sensor data
provisioning is still a major issue for the Internet of Things. Usually, these
steps can take up to days or even weeks due to a manual configuration and
binding by sensor experts that furthermore have to communicate with
domain-experts that define the requirements, e.g. the types of sensors, for the
smart environments. In previous work, we introduced a first vision of a method
for automated sensor registration, binding, and sensor data provisioning. In
this paper, we further detail and extend this vision, e.g., by introducing
optimization steps to enhance efficiency as well as effectiveness. Furthermore,
the approach is evaluated through a prototypical implementation.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-25&engl=1}
}
@inproceedings {INPROC-2016-24,
author = {Alexander Bergmayr and Uwe Breitenb{\"u}cher and Oliver Kopp and Manuel Wimmer and Gerti Kappel and Frank Leymann},
title = {{From Architecture Modeling to Application Provisioning for the Cloud by Combining UML and TOSCA}},
booktitle = {Proceedings of the 6th International Conference on Cloud Computing and Services Science (CLOSER 2016)},
publisher = {SCITEPRESS},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {97--108},
type = {Conference Paper},
month = {April},
year = {2016},
doi = {10.5220/0005806900970108},
isbn = {978-989-758-182-3},
keywords = {TOSCA; UML; Model-Driven Software Engineering; Cloud Computing; Cloud Modeling},
language = {English},
cr-category = {K.6 Management of Computing and Information Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {Recent efforts to standardize a deployment modeling language for cloud
applications resulted in TOSCA. At the same time, the software modeling
standard UML supports architecture modeling from different viewpoints.
Combining these standards from cloud computing and software engineering would
allow engineers to refine UML architectural models into TOSCA deployment models
that enable automatic provisioning of cloud applications. However, this
refinement task is currently carried out manually by recreating TOSCA models
from UML models because a conceptual mapping between the two languages as basis
for an automated translation is missing. In this paper, we exploit cloud
modeling extensions to UML called CAML as the basis for our approach
CAML2TOSCA, which aims at bridging UML and TOSCA. The validation of our
approach shows that UML models can directly be injected into a TOSCA-based
provisioning process. As current UML modeling tools lack cloud-based refinement
support for deployment models, the added value of CAML2TOSCA is emphasized
because it provides the glue between architecture modeling and application
provisioning.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-24&engl=1}
}
@inproceedings {INPROC-2016-22,
author = {Pascal Hirmer and Matthias Wieland and Uwe Breitenb{\"u}cher and Bernhard Mitschang},
title = {{Automated Sensor Registration, Binding and Sensor Data Provisioning}},
booktitle = {Proceedings of the CAiSE'16 Forum, at the 28th International Conference on Advanced Information Systems Engineering (CAiSE 2016)},
address = {Ljubljana, Slovenia},
publisher = {CEUR-WS.org},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {CEUR Workshop Proceedings},
volume = {1612},
pages = {81--88},
type = {Conference Paper},
month = {June},
year = {2016},
issn = {1613-0073},
keywords = {Internet of Things; Sensors; Ontologies; Data Provisioning},
language = {English},
cr-category = {J.6 Computer-Aided Engineering,
H.3.1 Content Analysis and Indexing},
ee = {http://ceur-ws.org/Vol-1612/paper11.pdf},
contact = {pascal.hirmer@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Today, the Internet of Things has evolved due to an increasing interconnection
of technical devices. However, the automated binding and management of things
and sensors is still a major issue. In this paper, we present a method and
system architecture for sensor registration, binding, and sensor data
provisioning. This approach enables automated sensor integration and data
processing by accessing the sensors and provisioning the data. Furthermore, the
registration of new sensors is done in an automated way to avoid a complex,
tedious manual registration. We enable (i) semantic description of sensors and
things as well as their attributes using ontologies, (ii) the registration of
sensors of a physical thing, (iii) a provisioning of sensor data using
different data access paradigms, and (iv) dynamic sensor binding based on
application requirements. We provide the Resource Management Platform as a
prototypical implementation of the architecture and corresponding runtime
measurements},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-22&engl=1}
}
@inproceedings {INPROC-2016-21,
author = {C. Timurhan Sungur and Uwe Breitenb{\"u}cher and Frank Leymann and Matthias Wieland},
title = {{Context-sensitive Adaptive Production Processes}},
booktitle = {Proceedings of the 48th CIRP Conference on Manufacturing Systems},
publisher = {Elsevier},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Procedia CIRP},
volume = {41},
pages = {147--152},
type = {Conference Paper},
month = {February},
year = {2016},
doi = {10.1016/j.procir.2015.12.076},
keywords = {Process; Automation; Optimization; Adaptation},
language = {English},
cr-category = {H.4.1 Office Automation,
H.5.3 Group and Organization Interfaces},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {To stay competitive, manufacturing companies need to adapt their processes in a
regular basis to the most recent conditions in their corresponding domains.
These adaptations are typically the result of turbulences, such as changes in
human resources, new technological advancements, or economic crises. Therefore,
to increase the efficiency of production processes, (i) automation, (ii)
optimization, and (iii) dynamic adaptation became the most important
requirements in this field. In this work, we propose a novel process modelling
and execution approach for creating self-organizing processes: Production
processes are extended by context-sensitive execution steps, for which
sub-processes are selected, elected, optimized, and finally executed on
runtime. During the election step, the most desired solution is chosen and
optimized based on selection and optimization strategies of the respective
processes. Moreover, we present a system architecture for modelling and
executing these context-sensitive production processes.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-21&engl=1}
}
@inproceedings {INPROC-2016-10,
author = {Christoph Stach},
title = {{Secure Candy Castle - A Prototype for Privacy-Aware mHealth Apps}},
booktitle = {Proceedings of the 17th International Conference on Mobile Data Management},
address = {Porto},
publisher = {IEEE Computer Society Conference Publishing Services},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {361--364},
type = {Demonstration},
month = {June},
year = {2016},
keywords = {mHealth; privacy; diagnostic game; diabetes},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
D.4.6 Operating Systems Security and Protection,
K.8 Personal Computing,
J.3 Life and Medical Sciences},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Due to rising medical costs, the healthcare landscape is on the move. Novel
treatment methods are badly required. Especially for the treatment of chronic
diseases the usage of smart devices in combination with medical devices for
telemedical screenings is a promising approach. If the patients are not in
control of the collection and processing of their health data, privacy concerns
limit their willingness to use such a method. In this paper, we present a
prototype for an Android-based privacy-aware health game for children suffering
from diabetes called Secure Candy Castle. In the game, the player keeps an
electronic diabetes diary in a playful manner. In doing this, s/he is supported
by various sensors. His or her data is analyzed and in case of a critical
health condition, the game notifies authorized persons. With our approach, the
user stays in control over his or her data, i.e., s/he defines which data
should be shared with the game, how accurate this data should be, and even how
the data is processed by the game. For this purpose, we apply the Privacy
Management Platform, a fine-grained and extendable permission system.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-10&engl=1}
}
@inproceedings {INPROC-2016-09,
author = {Christoph Stach and Bernhard Mitschang},
title = {{The Secure Data Container: An Approach to Harmonize Data Sharing with Information Security}},
booktitle = {Proceedings of the 17th International Conference on Mobile Data Management},
address = {Porto},
publisher = {IEEE Computer Society Conference Publishing Services},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {292--297},
type = {Conference Paper},
month = {June},
year = {2016},
keywords = {smart devices; information security; data sharing},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
D.4.6 Operating Systems Security and Protection},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Smart devices became Marc Weiser's Computer of the 21st Century. Due to their
versatility a lot of private data enriched by context data are stored on them.
Even the health industry utilizes smart devices as portable health monitors and
enablers for telediagnosis. So they represent a severe risk for information
security. Yet the platform providers' countermeasures to these threats are by
no means sufficient. In this paper we describe how information security can be
improved. Therefore, we postulate requirements towards a secure handling of
data. Based on this requirements specification, we introduce a secure data
container as an extension for the Privacy Management Platform. Since a complete
isolation of an app is usually not practicable, our approach also provides
secure data sharing features. Finally, we evaluate our approach from a
technical point of view as well as a security point of view and show its
applicability in an eHealth scenario.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-09&engl=1}
}
@inproceedings {INPROC-2016-07,
author = {Christoph Gr{\"o}ger and Laura Kassner and Eva Hoos and Jan K{\"o}nigsberger and Cornelia Kiefer and Stefan Silcher and Bernhard Mitschang},
title = {{The Data-Driven Factory. Leveraging Big Industrial Data for Agile, Learning and Human-Centric Manufacturing}},
booktitle = {Proceedings of the 18th International Conference on Enterprise Information Systems},
editor = {Slimane Hammoudi and Leszek Maciaszek and Michele M. Missikoff and Olivier Camp and Jose Cordeiro},
publisher = {SciTePress},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {40--52},
type = {Conference Paper},
month = {April},
year = {2016},
isbn = {978-989-758-187-8},
keywords = {IT Architecture, Data Analytics, Big Data, Smart Manufacturing, Industrie 4.0},
language = {English},
cr-category = {H.4.0 Information Systems Applications General,
J.2 Physical Sciences and Engineering},
contact = {Email an Christoph.Groeger@ipvs.uni-stuttgart.de oder laura.kassner@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Global competition in the manufacturing industry is characterized by ever
shorter product life cycles, increas-ing complexity and a turbulent
environment. High product quality, continuously improved processes as well as
changeable organizational structures constitute central success factors for
manufacturing companies. With the rise of the internet of things and Industrie
4.0, the increasing use of cyber-physical systems as well as the digitalization
of manufacturing operations lead to massive amounts of heterogeneous industrial
data across the product life cycle. In order to leverage these big industrial
data for competitive advantages, we present the concept of the data-driven
factory. The data-driven factory enables agile, learning and human-centric
manu-facturing and makes use of a novel IT architecture, the Stuttgart IT
Architecture for Manufacturing (SITAM), overcoming the insufficiencies of the
traditional information pyramid of manufacturing. We introduce the SITAM
architecture and discuss its conceptual components with respect to
service-oriented integration, ad-vanced analytics and mobile information
provisioning in manufacturing. Moreover, for evaluation purposes, we present a
prototypical implementation of the SITAM architecture as well as a real-world
application sce-nario from the automotive industry to demonstrate the benefits
of the data-driven factory.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-07&engl=1}
}
@inproceedings {INPROC-2016-06,
author = {Laura Kassner and Bernhard Mitschang},
title = {{Exploring Text Classification for Messy Data: An Industry Use Case for Domain-Specific Analytics}},
booktitle = {Advances in Database Technology - EDBT 2016, 19th International Conference on Extending Database Technology, Bordeaux, France, March 15-16, Proceedings},
publisher = {OpenProceedings.org},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {491--502},
type = {Conference Paper},
month = {March},
year = {2016},
isbn = {978-3-89318-070-7},
keywords = {recommendation system; automotive; text analytics; domain-specific language; automatic classification},
language = {English},
cr-category = {H.3.1 Content Analysis and Indexing,
H.3.3 Information Search and Retrieval,
H.4.2 Information Systems Applications Types of Systems,
J.1 Administration Data Processing},
ee = {http://openproceedings.org/2016/conf/edbt/paper-52.pdf},
contact = {Email an laura.kassner@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Industrial enterprise data present classification problems which are different
from those problems typically discussed in the scientific community -- with
larger amounts of classes and with domain-specific, often unstructured data. We
address one such problem through an analytics environment which makes use of
domain-specific knowledge. Companies are beginning to use analytics on large
amounts of text data which they have access to, but in day-to-day business,
manual effort is still the dominant method for processing unstructured data. In
the face of ever larger amounts of data, faster innovation cycles and higher
product customization, human experts need to be supported in their work through
data analytics. In cooperation with a large automotive manufacturer, we have
developed a use case in the area of quality management for supporting human
labor through text analytics: When processing damaged car parts for quality
improvement and warranty handling, quality experts have to read text reports
and assign error codes to damaged parts. We design and implement a system to
recommend likely error codes based on the automatic recognition of error
mentions in textual quality reports. In our prototypical implementation, we
test several methods for filtering out accurate recommendations for error codes
and develop further directions for applying this method to a competitive
business intelligence use case.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-06&engl=1}
}
@inproceedings {INPROC-2015-62,
author = {Eva Maria Grochowski and Eva Hoos and Stefan Waitzinger and Dieter Spath and Bernhard Mitschang},
title = {{Web-based collaboration system for interdisciplinary and interorganizational development teams: case study}},
booktitle = {Proceeding of the 23rd International Conference on Production Research},
publisher = {-},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--11},
type = {Conference Paper},
month = {August},
year = {2015},
keywords = {Collaboration; Web-based Platform},
language = {English},
cr-category = {J.1 Administration Data Processing},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The automotive industry faces three major challenges – shortage of fossil
fuels, politics of global warming and rising competition. In order to remain
competitive companies have to develop more efficient and alternative fuel
vehicles. Out of these challenges new cooperation models become inevitable. The
development of complex products like automobiles claims skills of various
disciplines e.g. engineering, IT. Furthermore, these skills are spread all over
various companies within the supply chain and beyond. Hence, supporting IT
systems for collaborative, innovative work is absolutely essential.
Interdisciplinary and interorganizational development has new demands on
information systems. These demands are not well analyzed at the moment and
therefore, existing collaboration platforms cannot address them. In order to
determine these new requirements and show the gap to existing collaboration
platform we performed a case study. In this case study, we analyze the research
campus “Active Research Environment for the Next Generation of Automobiles”
(ARENA2036). It is a is a new cooperation form, where diverse partners from the
industry, research institutes and universities elaborate collaboratively future
topics in the field of production and light weight construction under “one
single roof”. We focus on the special needs of the interdisciplinary,
interorganizational partners. The requirements were polled by a questionnaire.
About 80 percent of the active research workers in ARENA2036 answered the
questionnaire. By the answers we can identify the special needs and also role
profiles of the collaborators. The resulting role profiles specify the personal
requirements. These are used for an evaluation of existing information
platforms. The deficits between the offered features and the demands of the
partners, as well as new technologies supporting the individual needs of users
are the foundation for the information system concept for ARENA2036. In our
findings we present a role-based view on requirements for the development of an
information system for collaboration and cooperation. Based on these
requirements we then develop a concept for mobile apps with focus on a
role-based design.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2015-62&engl=1}
}
@inproceedings {INPROC-2015-60,
author = {Pascal Hirmer},
title = {{Flexible Modeling and Execution of Data Integration Flows}},
booktitle = {Proceedings of the 9th Advanced Summer School on Service Oriented Computing},
editor = {Johanna Barzen and Rania Khalaf and Frank Leymann and Bernhard Mitschang},
publisher = {IBM Research Report},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {153--154},
type = {Conference Paper},
month = {December},
year = {2015},
keywords = {Big Data; Data Integration; Data Flows; Pipes and Filters},
language = {English},
cr-category = {E.0 Data General,
E.1 Data Structures,
H.1 Models and Principles},
ee = {http://domino.research.ibm.com/library/cyberdig.nsf/papers/656B934403848E8A85257F1D00695A63/$File/rc25564.pdf},
contact = {Pascal.Hirmer@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {For my PhD, I am working towards an approach for data flow-based, ad-hoc data
integration based on cloud computing technologies. I started my work roughly
one year ago. In the following, I will present the research problem and
introduce a plan how to cope with the introduced issues.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2015-60&engl=1}
}
@inproceedings {INPROC-2015-57,
author = {Uwe Breitenb{\"u}cher and Pascal Hirmer and K{\'a}lm{\'a}n K{\'e}pes and Oliver Kopp and Frank Leymann and Matthias Wieland},
title = {{A Situation-Aware Workflow Modelling Extension}},
booktitle = {Proceedings of the 17th International Conference on Information Integration and Web-based Applications \& Services (iiWAS 2015)},
publisher = {ACM},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {478--484},
type = {Conference Paper},
month = {December},
year = {2015},
keywords = {Situation-Aware Workflows; Workflow Modelling; Workflow Management; Situation-Awareness; Workflow Execution},
language = {English},
cr-category = {D.3.3 Programming Language Constructs and Features,
H.4.1 Office Automation},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {The automation of business processes is of vital importance for organizations
to speed up their business and to lower costs. Due to emerging technologies in
the field of Internet of Things, changing situations can be recognized
automatically, which provides the basis for an automated adaptation of process
executions in order to react to changing circumstances. Although approaches
exist that enable creating self-adapting workflows, a systematic modelling
approach that supports the specification of situational dependencies directly
in workflow models is missing. In this paper, we tackle this issue by
presenting a modelling extension called SitME that defines (i) an extensible
Situation Event type, (ii) the concept of Situational Scopes, and (iii) a
visual notation. As the introduced extension is language-independent, we apply
the approach to BPEL to validate its practical feasibility.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2015-57&engl=1}
}
@inproceedings {INPROC-2015-55,
author = {Uwe Breitenb{\"u}cher and Tobias Binz and Oliver Kopp and Frank Leymann and Johannes Wettinger},
title = {{A Modelling Concept to Integrate Declarative and Imperative Cloud Application Provisioning Technologies}},
booktitle = {Proceedings of the 5th International Conference on Cloud Computing and Services Science (CLOSER 2015)},
publisher = {SciTePress},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {487--496},
type = {Conference Paper},
month = {May},
year = {2015},
keywords = {Cloud Application Provisioning; Automation; Declarative Modelling; Imperative Modelling},
language = {English},
cr-category = {K.6 Management of Computing and Information Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {Efficient application provisioning is one of the most important issues in Cloud
Computing today. For that purpose, various provisioning automation technologies
have been developed that can be generally categorized into two different
flavors: (i) declarative approaches are based on describing the desired goals
whereas (ii) imperative approaches are used to describe explicit sequences of
low-level tasks. Since modern Cloud-based business applications become more and
more complex, employ a plethora of heterogeneous components and services that
must be wired, and require complex configurations, the two kinds of
technologies have to be integrated to model the provisioning of such
applications. In this paper, we present a process modelling concept that
enables the seamless integration of imperative and declarative provisioning
models and their technologies while preserving the strengths of both flavors.
We validate the technical feasibility of the approach by applying the concept
to the workflow language BPEL and evaluate its features by several criteria.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2015-55&engl=1}
}
@inproceedings {INPROC-2015-52,
author = {Ulrike Pado and Cornelia Kiefer},
title = {{Short Answer Grading: When Sorting Helps and When it Doesn’t}},
booktitle = {Proceedings of the 4th workshop on NLP for Computer Assisted Language Learning, NODALIDA 2015},
editor = {Link{\"o}pings universitet Link{\"o}ping University Electronic Press},
address = {Wilna},
publisher = {LiU Electronic Press and ACL Anthology},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Link{\"o}ping Electronic Conference Proceedings},
pages = {42--50},
type = {Workshop Paper},
month = {May},
year = {2015},
isbn = {978-91-7519-036-5},
keywords = {short-answer grading; assisted grading; short-answer corpora},
language = {English},
cr-category = {J Computer Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Automatic short-answer grading promises improved student feedback at reduced
teacher effort both during and after in- struction. Automated grading is, how-
ever, controversial in high-stakes testing and complex systems can be difficult
to set up by non-experts, especially for fre- quently changing questions. We
propose a versatile, domain-independent system that assists manual grading by
pre-sorting an- swers according to their similarity to a ref- erence answer. We
show near state-of- the-art performance on the task of auto- matically grading
the answers from CREG (Meurers et al., 2011). To evaluate the grader assistance
task, we present CSSAG (Computer Science Short Answers in Ger- man), a new
corpus of German computer science questions answered by natives and
highly-proficient non-natives. On this cor- pus, we demonstrate the positive
influence of answer sorting on the slowest-graded, most complex-to-assess
questions.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2015-52&engl=1}
}
@inproceedings {INPROC-2015-47,
author = {Oliver Kopp and Michael Falkenthal and Niklas Hartmann and Frank Leymann and Holger Schwarz and Jessica Thomsen},
title = {{Towards a Cloud-based Platform Architecture for a Decentralized Market Agent}},
booktitle = {INFORMATIK 2015},
editor = {Douglas Cunningham and Petra Hofstedt and Klaus Meer and Ingo Schmitt},
publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Informatics (LNI)},
volume = {P-246},
pages = {69--80},
type = {Workshop Paper},
month = {September},
year = {2015},
isbn = {978-3-88579-640-4},
issn = {1617-5468},
language = {English},
cr-category = {J.m Computer Applications Miscellaneous},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {Reorganization of power generation, thereby replacing conventional energy
sources by innovative renewable energy sources, demands a change in
distribution grid structure and operation. The foreseen Decentralized Marked
Agent is a new role in the energy market sector accomplishing not only trading
on energy and operating reserve markets but also regulating flexibilities at
the distribution grid level, such as energy storage and decentralized energy
generators, and thereby considering system services and securing system
stability. This paper presents requirements on an IT system to support this new
role.We design an architecture matching these requirements and show how Cloud
computing technology can be used to implement the architecture. This enables
data concerning the distribution grid being automatically gathered and
processed by dedicated algorithms, aiming to optimize cost efficient operation
and the development of the distribution grid.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2015-47&engl=1}
}
@inproceedings {INPROC-2015-46,
author = {Jessica Thomsen and Niklas Hartmann and Florian Klumpp and Thomas Erge and Michael Falkenthal and Oliver Kopp and Frank Leymann and Sven Stando and Nino Turek and Christoph Schlenzig and Holger Schwarz},
title = {{Darstellung des Konzeptes -- DMA Decentralised Market Agent -- zur Bew{\"a}ltigung zuk{\"u}nftiger Herausforderungen in Verteilnetzen}},
booktitle = {INFORMATIK 2015},
editor = {Douglas Cunningham and Petra Hofstedt and Klaus Meer and Ingo Schmitt},
publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Informatics (LNI)},
volume = {P-246},
pages = {53--67},
type = {Workshop Paper},
month = {September},
year = {2015},
isbn = {978-3-88579-640-4},
issn = {1617-5468},
language = {German},
cr-category = {J.m Computer Applications Miscellaneous},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {In der vorliegenden Ver{\"o}ffentlichung wird ein Konzept f{\"u}r einen neuen
Marktakteur im Strommarkt vorgestellt, der im zuk{\"u}nftigen Smart Grid als
Aggregator und Planer fungieren kann. Dieser Decentralised Market Agent – DMA –
soll die Informationen aller vorhandenen Erzeugungs- und Speicheranlagen,
Lasten und Netzinformationen auf Verteilnetzebene aggregieren sowie mit lokalen
Akteuren und an den zentralen M{\"a}rkten agieren um einen kostenoptimalen Betrieb
und Ausbau des Systems Verteilnetzes zu realisieren. Zur Handlungsf{\"a}higkeit
dieser neuen Marktrolle bedarf es hochaufl{\"o}sender Messungen im Verteilnetz und
einer „real-time“ Aufbereitung der Messdaten. Im vorliegenden Paper sollen das
Konzept sowie die notwendigen Bausteine zur Erreichung der Handlungsf{\"a}higkeit
des DMA vorgestellt sowie die zuk{\"u}nftig geplanten Untersuchungen erl{\"a}utert
werden. Die detaillierte Entwicklung des Konzepts sowie weiterf{\"u}hrende Analysen
sind Teil des Projektes NEMAR – Netzbewirtschaftung als neue Marktrolle,
gef{\"o}rdert durch BMWi im Rahmen der Forschungsinitiative Zukunftsf{\"a}hige
Stromnetze.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2015-46&engl=1}
}
@inproceedings {INPROC-2015-45,
author = {Laura Kassner and Cornelia Kiefer},
title = {{Taxonomy Transfer: Adapting a Knowledge Representing Resource to new Domains and Tasks}},
booktitle = {Proceedings of the 16th European Conference on Knowledge Management},
publisher = {acpi Online},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {399--407},
type = {Conference Paper},
month = {September},
year = {2015},
keywords = {taxonomy; ontology; ontology population; semantic resources; domain-specific language},
language = {English},
cr-category = {I.2.7 Natural Language Processing,
I.2.4 Knowledge Representation Formalisms and Methods,
J.7 Computers in Other Systems},
contact = {Email an laura.kassner@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Today, data from different sources and different phases of the product life
cycle are usually analyzed in isolation and with considerable time delay.
Real-time integrated analytics is especially beneficial in a production
context. We present an architecture fordata- and analytics-driven exception
escalation in manufacturing and show the advantages of integrating unstructured
data.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2015-45&engl=1}
}
@inproceedings {INPROC-2015-34,
author = {Pascal Hirmer and Matthias Wieland and Holger Schwarz and Bernhard Mitschang and Uwe Breitenb{\"u}cher and Frank Leymann},
title = {{SitRS - A Situation Recognition Service based on Modeling and Executing Situation Templates}},
booktitle = {Proceedings of the 9th Symposium and Summer School On Service-Oriented Computing},
editor = {Johanna Barzen and Rania Khalaf and Frank Leymann and Bernhard Mitschang},
publisher = {IBM Research Report},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Technical Paper},
volume = {RC25564},
pages = {113--127},
type = {Conference Paper},
month = {December},
year = {2015},
keywords = {Situation Recognition, IoT, Context, Integration, Cloud Computing, OSLC},
language = {English},
cr-category = {J.6 Computer-Aided Engineering,
H.3.1 Content Analysis and Indexing},
ee = {http://domino.research.ibm.com/library/cyberdig.nsf/papers/656B934403848E8A85257F1D00695A63},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Today, the Internet of Things has evolved due to an advanced connectivity of
physical objects. Furthermore, Cloud Computing gains more and more interest for
the provisioning of services. In this paper, we want to further improve the
integration of these two areas by providing a cloud-based situation recognition
service – SitRS. This service can be used to integrate real world objects – the
things – into the internet by deriving their situational state based on
sensors. This enables context-aware applications to detect events in a smart
environment. SitRS is a basic service enabling a generic and easy
implementation of Smart* applications such as SmartFactorys, SmartCities,
SmartHomes. This paper introduces an approach containing a method and a system
architecture for the realization of such a service. The core steps of the
method are: (i) registration of the sensors, (ii) modeling of the situation,
and (iii) execution of the situation recognition. Furthermore, a prototypical
implementation of SitRS is presented and evaluated via runtime measurements.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2015-34&engl=1}
}
@inproceedings {INPROC-2015-33,
author = {Pascal Hirmer and Peter Reimann and Matthias Wieland and Bernhard Mitschang},
title = {{Extended Techniques for Flexible Modeling and Execution of Data Mashups}},
booktitle = {Proceedings of the 4th International Conference on Data Management Technologies and Applications (DATA)},
editor = {Markus Helfert and Andreas Holzinger and Orlando Belo and Chiara Francalanci},
address = {Colmar},
publisher = {SciTePress},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {111--122},
type = {Conference Paper},
month = {July},
year = {2015},
isbn = {978-989-758-103-8},
keywords = {Data Mashups, Ad-hoc Integration, Patterns, Data Flow},
language = {English},
cr-category = {E.1 Data Structures,
E.5 Data Files},
contact = {pascal.hirmer@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Today, a multitude of highly-connected applications and information systems
hold, consume and produce huge amounts of heterogeneous data. The overall
amount of data is even expected to dramatically increase in the future. In
order to conduct, e.g., data analysis, visualizations or other value-adding
scenarios, it is necessary to integrate specific, relevant parts of data into a
common source. Due to oftentimes changing environments and dynamic requests,
this integration has to support ad-hoc and flexible data processing
capabilities. Furthermore, an iterative and explorative trial-and-error
integration based on different data sources has to be possible. To cope with
these requirements, several data mashup platforms have been developed in the
past. However, existing solutions are mostly non-extensible, monolithic systems
or applications with many limitations regarding the mentioned requirements. In
this paper, we introduce an approach that copes with these issues (i) by the
introduction of patterns to enable decoupling from implementation details, (ii)
by a cloud-ready approach to enable availability and scalability, and (iii) by
a high degree of flexibility and extensibility that enables the integration of
heterogeneous data as well as dynamic (un-)tethering of data sources. We
evaluate our approach using runtime measurements of our prototypical
implementation.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2015-33&engl=1}
}
@inproceedings {INPROC-2015-25,
author = {Frank Steimle and Matthias Wieland and Bernhard Mitschang and Sebastian Wagner and Frank Leymann},
title = {{Design and Implementation Issues of a Secure Cloud-Based Health Data Management System}},
booktitle = {Proceedings of the 9th Symposium and Summer School On Service-Oriented Computing},
editor = {Johanna Barzen and Rania Khalaf and Frank Leymann and Bernhard Mitschang},
publisher = {IBM Research Report},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Technical Paper},
volume = {RC25564},
pages = {68--82},
type = {Conference Paper},
month = {December},
year = {2015},
keywords = {eHealth; mHealth; cloud data; data analysis; security},
language = {English},
cr-category = {C.2.4 Distributed Systems,
H.2.8 Database Applications,
J.3 Life and Medical Sciences},
ee = {http://domino.research.ibm.com/library/cyberdig.nsf/papers/656B934403848E8A85257F1D00695A63},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {eHealth gains more and more interest since a lot of end-user devices which
support health data capturing are available. The captured data has to be
managed and securely stored, in order to access it from different devices and
to share it with other users such as physicians. The aim of the German-Greek
research project ECHO is to support the treatment of patients, who suffer from
Chronic Obstructive Pulmonary Disease (COPD), a chronic respiratory disease.
Usually the patients need to be examined by their physicians on a regular basis
due to their chronic condition. Since this is very time consuming and
expensive, we develop an eHealth system which allows the physician to monitor
patients conditions remotely, e.g., via smart phones. Therefore, a secure data
processing and sharing eHealth platform is required. In this paper we introduce
a health data model and a corresponding platform-architecture for the
management and analysis of the data provided by the patients. Furthermore, we
show how the security of the data is ensured and we explain how the platform
can be hosted in a cloud-based environment using the OASIS standard TOSCA,
which enables a self-contained and portable description and management of
cloud-services.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2015-25&engl=1}
}
@inproceedings {INPROC-2015-24,
author = {Matthias Wieland and Holger Schwarz and Uwe Breitenb{\"u}cher and Frank Leymann},
title = {{Towards Situation-Aware Adaptive Workflows}},
booktitle = {Proceedings of the 13th Annual IEEE Intl. Conference on Pervasive Computing and Communications Workshops: 11th Workshop on Context and Activity Modeling and Recognition},
address = {St. Louis, Missouri, USA},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {32--37},
type = {Workshop Paper},
month = {March},
year = {2015},
keywords = {situation-awareness; adaptive-workflows; situation recognition; situation-aware workflow system},
language = {English},
cr-category = {H.4.1 Office Automation},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {Workflows are an established IT concept to achieve business goals in a reliable
and robust manner. However, the dynamic nature of modern information systems,
the upcoming Industry 4.0, and the Internet of Things increase the complexity
of modeling robust workflows significantly as various kinds of situations, such
as the failure of a production system, have to be considered explicitly.
Consequently, modeling workflows in a situation-aware manner is a complex
challenge that quickly results in big unmanageable workflow models. To overcome
these issues, we present an approach that allows workflows to become
situation-aware to automatically adapt their behavior according to the
situation they are in. The approach is based on aggregated context information,
which has been an important research topic in the last decade to capture
information about an environment. We introduce a system that derives high-level
situations from lower-level context and sensor information. A situation can be
used by different situation-aware workflows to adapt to the current situation
in their execution environment. SitOPT enables the detection of situations
using different situation-recognition systems, exchange of information about
detected situations, optimization of the situation recognition, and runtime
adaption and optimization of situationaware workflows based on the recognized
situations.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2015-24&engl=1}
}
@inproceedings {INPROC-2015-21,
author = {Oliver Kopp and Tobias Binz and Uwe Breitenb{\"u}cher and Frank Leymann and Thomas Michelbach},
title = {{A Domain-Specific Modeling Tool to Model Management Plans for Composite Applications}},
booktitle = {Proceedings of the 7th Central European Workshop on Services and their Composition, ZEUS 2015},
editor = {Thomas S. Heinze and Thomas M. Prinz},
publisher = {CEUR Workshop Proceedings},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {CEUR Workshop Proceedings},
volume = {1360},
pages = {51--54},
type = {Demonstration},
month = {May},
year = {2015},
issn = {1613-0073},
keywords = {TOSCA; BPMN Extension},
language = {English},
cr-category = {H.4.1 Office Automation},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2015-21/INPROC-2015-21.pdf,
http://ceur-ws.org/Vol-1360/,
www.zeus-workshop.eu/2015/},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {TOSCA is a standard to describe composite Cloud-applications and their
management in a portable fashion. Thereby, BPMN4TOSCA is a proposed extension
for BPMN to ease modeling of management plans. This demonstration presents a
web-based modeling tool that supports an updated version of BPMN4TOSCA. The
updated version supports direct wiring of data of tasks and events without the
need of separate data objects.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2015-21&engl=1}
}
@inproceedings {INPROC-2015-20,
author = {Sebastian Wagner and Oliver Kopp and Frank Leymann},
title = {{Choreography-based Consolidation of Interacting Processes Having Activity-based Loops}},
booktitle = {Proceedings of the 5th International Conference on Cloud Computing and Service Science (CLOSER 2015)},
address = {Stuttgart},
publisher = {SciTePress},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {284--296},
type = {Conference Paper},
month = {May},
year = {2015},
keywords = {BPEL; Choreography; Process Consolidation; Loops},
language = {English},
cr-category = {H.4.1 Office Automation},
contact = {sebastian.wagner@iaas.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {Choreographies describe the interaction between two or more parties. The
interaction behavior description might contain loops. In case two parties want
to merge their behavior to gain competitive advantage, the contained loop
constructs also have to be merged. This paper presents a language-independent
discussion on loop-structure pairing in choreographies and possible merging
strategies. Thereby, the focus is turned on loops grouping child activities to
be iterated. We show the feasibility of the merging strategies by applying them
to BPEL-based choreographies.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2015-20&engl=1}
}
@inproceedings {INPROC-2015-15,
author = {Laura Kassner and Bernhard Mitschang},
title = {{MaXCept – Decision Support in Exception Handling through Unstructured Data Integration in the Production Context. An Integral Part of the Smart Factory.}},
booktitle = {Proceedings of the 48th Hawaii International Conference on System Sciences: HICSS 48, 2015},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1007--1016},
type = {Conference Paper},
month = {January},
year = {2015},
keywords = {smart manufacturing; industrial internet; unstructured data; data integration; exception escalation; expert search},
language = {English},
cr-category = {H.4.0 Information Systems Applications General,
J.1 Administration Data Processing,
J.7 Computers in Other Systems},
contact = {laura.kassner@gsame.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Today, data from different sources and different phases of the product life
cycle are usually analyzed in isolation and with considerable time delay.
Real-time integrated analytics is especially beneficial in a production
context. We present an architecture fordata- and analytics-driven exception
escalation in manufacturing and show the advantages of integrating unstructured
data.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2015-15&engl=1}
}
@inproceedings {INPROC-2015-11,
author = {Christoph Stach},
title = {{How to Deal with Third Party Apps in a Privacy System - The PMP Gatekeeper}},
booktitle = {Proceedings of the 16th International Conference on Mobile Data Management},
address = {Pittsburgh},
publisher = {IEEE Computer Society Conference Publishing Services},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--6},
type = {Conference Paper},
month = {June},
year = {2015},
keywords = {Android; Privacy Systems; Legacy Apps},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
D.4.6 Operating Systems Security and Protection},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Nowadays, mobile devices collect a lot of private information. Therefore every
vendor of a mobile platform has to provide a sufficient mechanism to secure
this data. Android pursues a strategy to pass full control (and thus full
responsibility) over any private data to the user. However, the Android
Permission System is not sufficient for that purpose. Various third party
approaches try to enhance the Android privacy policy model. Nevertheless, these
approaches have to solve the problem of how to deal with Legacy Apps, i.e.,
apps that do not collaborate with an enhanced privacy policy model.
In this paper, we analyze various alternative privacy systems and discuss
different approaches of how to deal with Legacy Apps. Based on our findings, we
introduce the so-called PMP Gatekeeper, a best of breed approach dealing with
Legacy Apps for the Privacy Management Platform (PMP). The PMP Gatekeeper
classifies apps and deals with each class appropriately. So the user can adjust
privacy settings for every kind of app. With our prototype we show, that the
PMP in combination with the PMP Gatekeeper becomes a holistic privacy system.
Although our prototype is for Android, our realization approach can be applied
to other application platforms in order to offer a satisfying privacy system.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2015-11&engl=1}
}
@inproceedings {INPROC-2014-76,
author = {Peter Reimann and Holger Schwarz and Bernhard Mitschang},
title = {{A Pattern Approach to Conquer the Data Complexity in Simulation Workflow Design}},
booktitle = {Proceedings of OnTheMove Federated Conferences and Workshops (OTM), 22nd International Conference on Cooperative Information Systems (CoopIS 2014)},
editor = {R. Meersman et al.},
address = {Amantea, Italy},
publisher = {Springer Berlin Heidelberg},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {LNCS},
volume = {8841},
pages = {21--38},
type = {Conference Paper},
month = {October},
year = {2014},
keywords = {Data Provisioning; Data Management Patterns; SIMPL; Simulation Workflow; Simulation Workflow Design; Workflow; Workflow Design},
language = {English},
cr-category = {H.2.5 Heterogeneous Databases,
H.2.8 Database Applications,
H.4.1 Office Automation},
contact = {Peter Reimann Peter.Reimann@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Scientific workflows may be used to enable the collaborative implementation of
scientific applications across various domains. Since each domain has its own
requirements and solutions for data handling, such workflows often have to deal
with a highly heterogeneous data environment. This results in an increased
complexity of workflow design. As scientists typically design their scientific
workflows on their own, this complexity hinders them to concentrate on their
core issue, namely the experiments, analyses, or simulations they conduct. In
this paper, we present a novel approach to a pattern-based abstraction support
for the complex data management in simulation workflows that goes beyond
related work in similar research areas. A pattern hierarchy with different
abstraction levels enables a separation of concerns according to the skills of
different persons involved in workflow design. The goal is that scientists are
no longer obliged to specify low-level details of data management in their
workflows. We discuss the advantages of this approach and show to what extent
it reduces the complexity of simulation workflow design. Furthermore, we
illustrate how to map patterns onto executable workflows. Based on a
prototypical implementation of three real-world simulations, we evaluate our
approach according to relevant requirements.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-76&engl=1}
}
@inproceedings {INPROC-2014-75,
author = {Jan K{\"o}nigsberger and Stefan Silcher and Bernhard Mitschang},
title = {{SOA-GovMM: A Meta Model for a Comprehensive SOA Governance Repository}},
booktitle = {Proceedings of the 2014 IEEE 15th International Conference on Information Reuse and Integration},
editor = {James Joshi and Elisa Bertino and Bhavani Thuraisingham and Ling Liu},
address = {Piscataway, NJ, USA},
publisher = {IEEE Systems, Man, and Cybernetics Society (SMC)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {187--194},
type = {Conference Paper},
month = {August},
year = {2014},
isbn = {978-1-4799-5880-1},
keywords = {Service-Oriented Architecture, SOA Governance, Meta Model, Governance Repository},
language = {English},
cr-category = {D.2.11 Software Engineering Software Architectures,
H.3.3 Information Search and Retrieval},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2014-75/INPROC-2014-75.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In recent years, the paradigm of service-oriented architecture (SOA) has more
and more found its way into many organizations. The SOA principles of loosely
coupled and reusable services has convinced decision makers in many
organizations to start SOA initiatives. Yet, the lack of proper governance
mechanisms has doomed many projects to fail. Although some SOA governance
frameworks exist, they differ highly in scope and none of them covers the whole
spectrum necessary to properly govern a SOA. In this paper we identify and
discuss eleven core areas the governance of a SOA has to cover in order to
realize the intended benefit in flexibility and agility. We then analyze and
evaluate existing SOA governance frameworks with regard to those requirements.
Subsequently, we present a meta model composed of four parts: Service Provider,
Service Consumer, Organizational Structure and Business Object. We show, that
those four parts cover all requirements for a comprehensive SOA governance
repository. This allows an organization to leverage the information integrated
in the repository to better govern their SOA and therefore improve the chances
of its success.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-75&engl=1}
}
@inproceedings {INPROC-2014-66,
author = {Pascal Hirmer and Uwe Breitenb{\"u}cher and Tobias Binz and Frank Leymann},
title = {{Automatic Topology Completion of TOSCA-based Cloud Applications}},
booktitle = {Proceedings des CloudCycle14 Workshops auf der 44. Jahrestagung der Gesellschaft f{\"u}r Informatik e.V. (GI)},
address = {Bonn},
publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {LNI},
volume = {232},
pages = {247--258},
type = {Workshop Paper},
month = {September},
year = {2014},
isbn = {978-3-88579-626-8},
keywords = {TOSCA; Automatic Topology Completion; Provisioning; Cloud Computing; Topology Modeling},
language = {English},
cr-category = {K.6 Management of Computing and Information Systems},
ee = {http://subs.emis.de/LNI/Proceedings/Proceedings232/article82.html},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {Automation of application provisioning is one of the key aspects of Cloud
Computing. Standards such as the Topology and Orchestration Specification for
Cloud Applications (TOSCA) provide a means to model application topologies
which can be provisioned fully automatically. Many automated provisioning
engines require that these topologies are complete in the sense of specifying
all application, platform, and infrastructure components. However, modeling
complete models is a complex, timeconsuming, and error-prone task that
typically requires a lot of technical expertise. In this paper, we present an
approach that enables users to model incomplete TOSCA application topologies
that are completed automatically to deployable, complete models. This enables
users to focus on the business-relevant application components and simplifies
the creation process tremendously by minimizing the required effort and
know-how. We prove the technical feasibility of the presented approach by a
prototypical implementation based on the open source modeling tool Winery. In
addition, we evaluate the approach by standards-compliance and performance.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-66&engl=1}
}
@inproceedings {INPROC-2014-65,
author = {Eva Hoos},
title = {{Design method for developing a Mobile Engineering-Application Middleware (MEAM)}},
booktitle = {Proceedings of the 2014 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops,24-28 March, 2014, Budapest, Hungary},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {176--177},
type = {Workshop Paper},
month = {March},
year = {2014},
doi = {10.1109/PerComW.2014.6815193},
language = {English},
cr-category = {J.2 Physical Sciences and Engineering,
J.4 Social and Behavioral Sciences},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Mobile Apps running on smartphones and tablets offer a new possibility to
enhance the work of engineers because they provide an easy-to-use,
touchscreen-based handling and can be used anytime and anywhere. Introducing
mobile apps in the engineering domain is difficult because the IT environment
is heterogeneous and engineering-specific challenges in the app development
arise e. g., large amount of data and high security requirements. There is a
need for an engineering-specific middleware to facilitate and standardize the
app development. However, such a middleware does not yet exist as well as a
holistic set of requirements for the development. Therefore, we propose a
design method which offers a systematic procedure to develop Mobile
Engineering-Application Middleware.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-65&engl=1}
}
@inproceedings {INPROC-2014-64,
author = {Eva Hoos and Christoph Gr{\"o}ger and Bernhard Mitschang},
title = {{Mobile Apps in Engineering: A Process-Driven Analysis of Business Potentials and Technical Challenges}},
booktitle = {Proceedings of the 9th CIRP Conference on Intelligent Computation in Manufacturing Engineering (CIRP ICME), 23-25 July, 2014, Capri (Naples), Italy},
publisher = {CIRP},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Conference Paper},
month = {July},
year = {2014},
language = {German},
cr-category = {H.4.0 Information Systems Applications General,
J.4 Social and Behavioral Sciences,
J.2 Physical Sciences and Engineering},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Mobile apps on smartphones and tablet PCs are more and more employed in
enterprises to optimize business processes, e.g. by elimination of paper-based
data collection. With respect to engineering, mobile apps provide a huge
potential for increased flexibility and efficiency due to their anywhere and
anytime characteristics, e.g., for product testing in the field. However, not
every usage of mobile apps is beneficial from a business point of view and
existing apps for engineering represent only rudimentary front-ends for
stationary IT systems without an app-oriented redesign. Hence, there are three
core challenges to leverage the potential of mobile apps in engineering: (1)
identifying value-added app usage scenarios from a process point of view, (2)
realizing a task-oriented and context-aware user interface design and (3)
mastering technical obstacles at the app implementation. In this paper, we
address these challenges by a case-oriented analysis of selected engineering
processes in the automotive industry in order to identify engineering tasks
suited for the usage of mobile apps. On this basis, we design corresponding
engineering apps and analyze their business potentials. Moreover, we derive
common technological challenges for the development of engineering apps, e.g.
data synchronization aspects, and highlight further research issues.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-64&engl=1}
}
@inproceedings {INPROC-2014-63,
author = {Alexander Blehm and Volha Kalach and Alexander Kicherer and Gustav Murawski and Tim Waizenegger and Matthias Wieland},
title = {{Policy-Framework – Eine Methode zur Umsetzung von Sicherheits-Policies im Cloud-Computing}},
booktitle = {Proceedings des CloudCycle14 Workshops auf der 44. Jahrestagung der Gesellschaft f{\"u}r Informatik e.V. (GI)},
publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {LNI},
type = {Workshop Paper},
month = {September},
year = {2014},
language = {German},
cr-category = {K.6 Management of Computing and Information Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {Cloud-Computing gewinnt immer mehr an Bedeutung bei der kosteneffizienten und
skalierbaren Bereitstellung von IT-Diensten. Damit sich Cloud-Computing jedoch
durchsetzen kann, muss die Sicherheit und Compliance der Dienste garantiert
werden, d. h. die Einhaltung von Gesetzen, Richtlinien und Datenschutzvorgaben.
Um diese Ziele zu erreichen, wird in diesem Beitrag ein Policy-Framework
vorgestellt, welches die Umsetzung von Sicherheits- Policies im Cloud-Computing
erm{\"o}glicht. Eine Policy beschreibt dabei nicht-funktionale Anforderungen an
Cloud-Dienste. Des weiteren werden verschiedene prototypisch umgesetzte
Policies beschieben und es wird ein Moodle- System als Anwendungsbeispiel f{\"u}r
einen Cloud-Dienst mit den vorhandenen Policies annotiert. Dadurch erfolgt eine
Evaluation des Policy-Frameworks.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-63&engl=1}
}
@inproceedings {INPROC-2014-61,
author = {Marina Bitsaki and Christos Koutras and Georgios Koutras and Frank Leymann and Bernhard Mitschang and Christos Nikolaou and Nikos Siafakas and Steve Strauch and Nikos Tzanakis and Matthias Wieland},
title = {{An Integrated mHealth Solution for Enhancing Patients' Health Online}},
booktitle = {Proceedings of the 6th European Conference of the International Federation for Medical and Biological Engineering (MBEC'14)},
publisher = {International Federation for Medical and Biological Engineering (IFMBE)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--4},
type = {Conference Paper},
month = {September},
year = {2014},
keywords = {Patient monitoring; COPD; ICT application services; Cloud technology; Online services; Mobile applications; Intelligent data mining},
language = {English},
cr-category = {C.2.4 Distributed Systems,
H.2.8 Database Applications,
H.4.1 Office Automation,
J.3 Life and Medical Sciences},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2014-61/INPROC-2014-61.pdf},
contact = {a href=``http://www.iaas.uni-stuttgart.de/institut/mitarbeiter/strauch''Steve Strauch/ a},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {Lack of time or economic difficulties prevent chronic obstructive pulmonary
disease patients from communicating with their physicians, thus inducing
exacerbation of their chronic condition and possible hospitalization. In this
paper we propose a platform that integrates mobile application technologies and
cloud computing to provide regular monitoring of patients and avoidance of
medical emergencies.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-61&engl=1}
}
@inproceedings {INPROC-2014-59,
author = {Laura Kassner and Christoph Gr{\"o}ger and Bernhard Mitschang and Engelbert Westk{\"a}mper},
title = {{Product Life Cycle Analytics - Next Generation Data Analytics on Structured and Unstructured Data}},
booktitle = {Proceedings of the 9th CIRP Conference on Intelligent Computation in Manufacturing Engineering - CIRP ICME '14},
address = {Naples},
publisher = {Elsevier},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--6},
type = {Conference Paper},
month = {July},
year = {2014},
keywords = {analytics, big data, unstructured data, text analytics, product life cycle management, PLM, data warehousing, product life cycle analytics, data integration},
language = {English},
cr-category = {H.3.1 Content Analysis and Indexing,
H.3.4 Information Storage and Retrieval Systems and Software,
J.2 Physical Sciences and Engineering,
J.6 Computer-Aided Engineering},
contact = {Per Mail an laura.kassner@gsame.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Enormous amounts of unstructured data, e. g., emails, failure reports and
customer complaints, are abundant around the product life cycle and provide a
huge potential for analytics-driven optimization. However, existing analytics
approaches on unstructured data are fraught with three major insufficiencies
limiting comprehensive business improvement: (1) they focus on isolated data
sources from a single life cycle phase {\^a}€“ for example, data from a customer
relationship management system are mined for frequent complaints without
considering manufacturing failure reports related to the same product; (2) they
do not make use of structured data for holistic analytics, e. g., to
automatically correlate unstructured failure reports with structured
performance data of a manufacturing execution system; (3) existing
implementations of data integration and analytics components are typically
cost-intensive, manual and case-based, without a general framework.
To address these issues, we present our Product Life Cycle Analytics (PLCA)
approach, a platform and a reference architecture for the holistic integration
and analysis of unstructured and structured data from multiple data sources
around the product life cycle. For this purpose, we survey structured and
unstructured data sources around the product life cycle and discuss limitations
of existing analytics approaches like traditional Business Intelligence
applications. Moreover, we develop use cases for holistic life-cycle-oriented
analytics and give examples based on case study investigations, e. g., for the
holistic analysis of unstructured failure reports in the automotive industry.
On this basis, we discuss technical requirements and components of our
reference architecture, such as a versatile, plug-and-play Natural Language
Processing pipeline and mechanisms for linking structured and unstructured data
in a holistic data warehouse. Finally, we analyse implementation issues and
investigate underlying technologies from the areas of text analytics and data
mining in order to evaluate our architecture with respect to the identified use
cases.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-59&engl=1}
}
@inproceedings {INPROC-2014-58,
author = {Christoph Stach and Bernhard Mitschang},
title = {{Design and Implementation of the Privacy Management Platform}},
booktitle = {Proceedings of the 15th International Conference on Mobile Data Management},
address = {Brisbane},
publisher = {IEEE Computer Society Conference Publishing Services},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--4},
type = {Conference Paper},
month = {July},
year = {2014},
keywords = {Android; policy model; implementation strategies},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
D.4.6 Operating Systems Security and Protection},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Nowadays, mobile platform vendors have to concern themselves increasingly about
how to protect their users' privacy. As Google is less restrictive than their
competitors regarding their terms of use for app developers, it is hardly
surprising that malware spreads even in Google Play. To make matters worse, in
Android every user is responsible for his or her private data and s/he is
frequently overwhelmed with this burden because of the fragile Android
permission mechanism. Thus, the calls for a customizable, fine-grained,
context-based, crash-proof, and intuitive privacy management system are growing
louder. To cope with these requests, we introduce the Privacy Management
Platform (PMP) and we discuss three alternative implementation strategies for
such a system.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-58&engl=1}
}
@inproceedings {INPROC-2014-52,
author = {Peter Reimann and Tim Waizenegger and Matthias Wieland and Holger Schwarz},
title = {{Datenmanagement in der Cloud f{\"u}r den Bereich Simulationen und Wissenschaftliches Rechnen}},
booktitle = {Proceedings des 2. Workshop Data Management in the Cloud auf der 44. Jahrestagung der Gesellschaft f{\"u}r Informatik e.V. (GI)},
editor = {Gesellschaft f{\"u}r Informatik e.V. (GI)},
address = {Stuttgart, Deutschland},
publisher = {Lecture Notes in Informatics (LNI)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Workshop Paper},
month = {September},
year = {2014},
language = {German},
cr-category = {H.2.5 Heterogeneous Databases,
H.2.8 Database Applications,
H.4.1 Office Automation},
contact = {Peter Reimann Peter.Reimann@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {F{\"u}r Organisationen, die Simulationen nicht als ihr Kerngesch{\"a}ft verstehen und
sie daher eher sporadisch durchf{\"u}hren, lohnt sich der Betrieb einer eigenen
Recheninfrastruktur nur selten. Dies betrifft z.B. kleine und mittlere
Unternehmen sowie einige wissenschaftliche Institutionen. Besserung k{\"o}nnen
{\"o}ffentliche Cloud-Infrastrukturen als Plattform f{\"u}r die Ausf{\"u}hrung von
Simulationen verschaffen. Das Datenmanagement in der Cloud ist aber speziell
f{\"u}r den Bereich Simulationen noch weitgehend unerforscht. In diesem Beitrag
identifizieren wir daher noch offene Fragestellungen bzgl. des Datenmanagements
von Simulationen in der Cloud. Dies betrifft vor allem die Datenbereitstellung
und inwieweit nutzer- und simulationsspezifische Anforderungen an das
Datenmanagement in der Cloud eingehalten werden k{\"o}nnen. Wir untersuchen
Technologien, welche sich diesen Fragestellungen widmen, und diskutieren, ob
und wie sie in der Cloud sowie f{\"u}r Simulationen einsetzbar sind. Weiterhin
skizzieren wir wichtige zuk{\"u}nftige Forschungsthemen.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-52&engl=1}
}
@inproceedings {INPROC-2014-51,
author = {Peter Reimann and Holger Schwarz},
title = {{Simulation Workflow Design Tailor-Made for Scientists}},
booktitle = {Proceedings of the 26th International Conference on Scientific and Statistical Database Management},
address = {Aalborg, Denmark},
publisher = {ACM},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Demonstration},
month = {June},
year = {2014},
keywords = {Data Provisioning; Data Management Patterns; Simulation Workflow; Simulation Workflow Design},
language = {English},
cr-category = {H.2.5 Heterogeneous Databases,
H.2.8 Database Applications,
H.4.1 Office Automation},
contact = {Peter Reimann Peter.Reimann@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Scientific workflows have to deal with highly heterogeneous data environments.
In particular, they have to carry out complex data provisioning tasks that
filter and transform heterogeneous input data in such a way that underlying
tools or services can ingest them. This results in a high complexity of
workflow design. Scientists often want to design their workflows on their own,
but usually do not have the necessary skills to cope with this complexity.
Therefore, we have developed a pattern-based approach to workflow design,
thereby mainly focusing on workflows that realize numeric simulations. This
approach removes the burden from scientists to specify low-level details of
data provisioning. In this demonstration, we apply a prototype implementation
of our approach to various use cases and show how it makes simulation workflow
design tailor-made for scientists.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-51&engl=1}
}
@inproceedings {INPROC-2014-50,
author = {Peter Reimann and Holger Schwarz and Bernhard Mitschang},
title = {{Data Patterns to Alleviate the Design of Scientific Workflows Exemplified by a Bone Simulation}},
booktitle = {Proceedings of the 26th International Conference on Scientific and Statistical Database Management},
address = {Aalborg, Denmark},
publisher = {ACM},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Conference Paper},
month = {June},
year = {2014},
keywords = {Data Provisioning; Data Management Patterns; Workflow; SIMPL; Simulation Workflow; BPEL; WS-BPEL},
language = {English},
cr-category = {H.2.5 Heterogeneous Databases,
H.2.8 Database Applications,
H.4.1 Office Automation},
contact = {Peter Reimann Peter.Reimann@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Scientific workflows often have to process huge data sets in a multiplicity of
data formats. For that purpose, they typically embed complex data provisioning
tasks that transform these heterogeneous data into formats the underlying tools
or services can handle. This results in an increased complexity of workflow
design. As scientists typically design their scientific workflows on their own,
this complexity hinders them to concentrate on their core issue, namely the
experiments, analyses, or simulations they conduct. In this paper, we present
the core idea of a pattern-based approach to alleviate the design of scientific
workflows. This approach is particularly targeted at the needs of scientists.
We exemplify and assess the pattern-based design approach by applying it to a
complex scientific workflow realizing a real-world simulation of structure
changes in bones.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-50&engl=1}
}
@inproceedings {INPROC-2014-49,
author = {Christoph Gr{\"o}ger and Holger Schwarz and Bernhard Mitschang},
title = {{The Deep Data Warehouse. Link-based Integration and Enrichment of Warehouse Data and Unstructured Content}},
booktitle = {Proceedings of the 18th IEEE International Enterprise Distributed Object Computing Conference (EDOC), 01-05 September, 2014, Ulm, Germany},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Conference Paper},
month = {September},
year = {2014},
language = {English},
cr-category = {H.2.7 Database Administration},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Data warehouses are at the core of enterprise IT and enable the efficient
storage and analysis of structured data. Besides, unstructured content, e.g.,
emails and documents, constitutes more than half of the entire enterprise data
and contains a lot of implicit knowledge about warehouse entities. Thus,
holistic ana-lytics require the integration of structured warehouse data and
unstructured content to generate novel insights. These insights can also be
used to enrich the integrated data and to create a new basis for further
analytics. Existing integration approaches only support a limited range of
analytical applications and require the costly adaptation of the warehouse
schema. In this paper, we present the Deep Data Warehouse (DeepDWH), a novel
type of data warehouse based on the flexible integration and enrichment of
warehouse data and unstructured content, addressing the variety challenge of
Big Data. It relies on information-rich in-stance-level links between warehouse
elements and content items, which are represented in a graph-oriented
structure. Neither adaptations of the existing warehouse nor the design of an
overall federated schema are required. We design a conceptual linking model and
develop a logical schema for links based on a property graph. As a proof of
concept, we present a prototypical imple-mentation of the DeepDWH including a
link store based on a graph database.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-49&engl=1}
}
@inproceedings {INPROC-2014-43,
author = {Marcel Clappier and Tobias Helbig and Jan K{\"o}nigsberger and Julia Sch{\"u}tten-Breitenbach and Kamran Taheri},
title = {{A Decision Concept for the Economic Evaluation of Different Recycling Paths in the Dismantling of End-of-Life Vehicles}},
booktitle = {Smart Digital Futures 2014},
editor = {Rui Neves-Silva and George A. Tshirintzis and Vladimir Uskov and Robert J. Howlett and Lakhmi C. Jain},
address = {Amsterdam, Berlin, Tokyo, Washington},
publisher = {IOS Press},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Frontiers in Artificial Intelligence and},
volume = {262},
pages = {38--47},
type = {Conference Paper},
month = {January},
year = {2014},
isbn = {978-1-61499-404-6},
isbn = {978-1-61499-405-3},
keywords = {decision concept, end-of-life vehicles, recycling, IT concept, app concept},
language = {English},
cr-category = {H.4.2 Information Systems Applications Types of Systems,
H.5.2 Information Interfaces and Presentation User Interfaces},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2014-43/INPROC-2014-43.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Due to the scarcity of raw materials, the recycling of end-of-life vehicles is
becoming increasingly important. The essential decision for the cycle of
materials is made in the dismantling company which disassembles the vehicle
parts and determines the further recycling path: reuse as a replacement part,
specific substantial exploitation or exploitation by shredding. This decision
is the central aspect of this paper, taking into account the economic aspects,
the uncertainties of the market and the applicability of the method. Therefore
a detailed cost analysis model is presented, including a method for prediction
of the replacement part market. In addition an IT concept is presented to
visualize the result of the analysis and support the decision making.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-43&engl=1}
}
@inproceedings {INPROC-2014-36,
author = {Uwe Breitenb{\"u}cher and Tobias Binz and Oliver Kopp and Frank Leymann and Matthias Wieland},
title = {{Context-aware Cloud Application Management}},
booktitle = {Proceedings of the 4th International Conference on Cloud Computing and Services Science (CLOSER 2014)},
publisher = {SciTePress},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {499--509},
type = {Conference Paper},
month = {April},
year = {2014},
keywords = {Application Management; Context; Automation; Cloud Computing},
language = {English},
cr-category = {K.6 Management of Computing and Information Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {The automation of application management is one of the most important issues in
Cloud Computing. However, the steadily increasing number of different services
and software components employed in composite Cloud applications leads to a
higher risk of unexpected side effects when different technologies work
together that bring their own proprietary management APIs. Due to unknown
dependencies and the increasing diversity and heterogeneity of employed
technologies, even small management tasks on single components may compromise
the whole application functionality for reasons that are neither expected nor
obvious to non-experts. In this paper, we tackle these issues by introducing a
method that enables detecting and correcting unintended effects of management
tasks in advance by analyzing the context in which tasks are executed. We
validate the method practically and show how context-aware expert management
knowledge can be applied fully automatically to running Cloud applications.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-36&engl=1}
}
@inproceedings {INPROC-2014-28,
author = {Christoph Gr{\"o}ger and Holger Schwarz and Bernhard Mitschang},
title = {{Prescriptive Analytics for Recommendation-based Business Process Optimization}},
booktitle = {Proceedings of the 17th International Conference on Business Information Systems (BIS), 22-23 May, 2014, Larnaca, Cyprus},
editor = {Witold Abramowicz and Angelika Kokkinaki},
publisher = {Springer},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {176},
pages = {25--37},
type = {Conference Paper},
month = {May},
year = {2014},
keywords = {Prescriptive Analytics, Process Optimization, Process Warehouse, Data Mining, Business Intelligence, Decision Support},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Continuously improved business processes are a central success factor for
companies. Yet, existing data analytics do not fully exploit the data generated
during process execution. Particularly, they miss prescriptive techniques to
transform analysis results into improvement actions. In this paper, we present
the data-mining-driven concept of recommendation-based business process
op-timization on top of a holistic process warehouse. It prescriptively
generates ac-tion recommendations during process execution to avoid a predicted
metric de-viation. We discuss data mining techniques and data structures for
real-time prediction and recommendation generation and present a proof of
concept based on a prototypical implementation in manufacturing.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-28&engl=1}
}
@inproceedings {INPROC-2014-25,
author = {Uwe Breitenb{\"u}cher and Tobias Binz and Oliver Kopp and Frank Leymann},
title = {{Vinothek - A Self-Service Portal for TOSCA}},
booktitle = {Proceedings of the 6th Central-European Workshop on Services and their Composition (ZEUS 2014)},
editor = {Nico Herzberg and Matthias Kunze},
publisher = {CEUR-WS.org},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {CEUR Workshop Proceedings},
volume = {1140},
pages = {69--72},
type = {Demonstration},
month = {March},
year = {2014},
issn = {1613-0073},
language = {English},
cr-category = {H.4.1 Office Automation},
ee = {http://ceur-ws.org/Vol-1140/,
http://www.zeus-workshop.eu/},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {The TOSCA standard provides a means to describe Cloud applications and their
management in a portable way. TOSCA-based applications can be deployed on
various standard-compliant TOSCA Runtimes. Vinothek is a Web-based Self-Service
Portal that hides the technical details of TOSCA Runtimes and provides end
users a simple graphical interface to provision Cloud applications on demand.
This demonstration shows how Vinothek supports automated provisioning of
applications and how it facilitates integrating TOSCA Runtimes.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-25&engl=1}
}
@inproceedings {INPROC-2014-17,
author = {Sebastian Wagner and Oliver Kopp and Frank Leymann},
title = {{Choreography-based Consolidation of Multi-Instance BPEL Processes}},
booktitle = {Proceedings of the 4th International Conference on Cloud Computing and Services Science (CLOSER 2014);Barcelona, Spain, April 3-5, 2014.},
editor = {SciTePress},
address = {Barcelona},
publisher = {SciTePress},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--12},
type = {Conference Paper},
month = {April},
year = {2014},
keywords = {BPEL; Choreography; Process Consolidation; Multi-Instance Interactions},
language = {English},
cr-category = {H.4.1 Office Automation},
contact = {sebastian.wagner@iaas.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {Interaction behavior between processes of different organizational units such
as an enterprise and its suppliers can be modeled by choreographies. When
organizations decide, for instance, to gain more control about their suppliers
to minimize transaction costs, they may decide to insource these companies.
This especially includes the integration of the partner processes into the
organization’s processes. Existing works are able to merge single-instance BPEL
process interactions where each process model is only instantiated once during
choreography execution. However, there exist different interaction scenarios
where one process interacts with several instances of another process and where
the number of instances involved is not known at design time but determined
during runtime of the choreography. In this work we investigate these
interaction scenarios and extend the process consolidation approach in a way
that we can emulate the multi-instance interaction scenarios in the merged
process model.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-17&engl=1}
}
@inproceedings {INPROC-2014-14,
author = {Eva Hoos and Christoph Gr{\"o}ger and Stefan Kramer and Bernhard Mitschang},
title = {{Improving Business Processes through Mobile Apps - An Analysis Framework to Identify Value-added App Usage Scenarios}},
booktitle = {Proceedings of the 16th International Conference on Enterprise Information Systems (ICEIS), 27-30 April, 2014, Lisbon, Portugal},
publisher = {SciTePress},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Conference Paper},
month = {April},
year = {2014},
keywords = {Business Processes; Analysis Framework; Mobile Application},
language = {English},
cr-category = {H.1.1 Systems and Information Theory,
K.6.1 Project and People Management},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Mobile apps offer new possibilities to improve business processes. However, the
introduction of mobile apps is typically carried out from a technology point of
view. Hence, process improvement from a business point of view is not
guaranteed. There is a methodological lack for a holistic analysis of business
processes regarding mobile technology. For this purpose, we present an analysis
framework, which comprises a systematic methodology to identify value-added
usage scenarios of mobile technology in business processes with a special focus
on mobile apps. The framework is based on multi-criteria analysis and portfolio
analy- sis techniques and it is evaluated in a case-oriented investigation in
the automotive industry.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-14&engl=1}
}
@inproceedings {INPROC-2014-10,
author = {Christoph Gr{\"o}ger and Holger Schwarz and Bernhard Mitschang},
title = {{The Manufacturing Knowledge Repository. Consolidating Knowledge to Enable Holistic Process Knowledge Management in Manufacturing}},
booktitle = {Proceedings of the 16th International Conference on Enterprise Information Systems (ICEIS), 27-30 April, 2014, Lisbon, Portugal},
publisher = {SciTePress},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Conference Paper},
month = {April},
year = {2014},
language = {English},
cr-category = {H.2.7 Database Administration,
J.1 Administration Data Processing},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The manufacturing industry is faced with strong competition making the
companies’ knowledge resources and their systematic management a critical
success factor. Yet, existing concepts for the management of process knowledge
in manufacturing are characterized by major shortcomings. Particularly, they
are either exclusively based on structured knowledge, e. g., formal rules, or
on unstructured knowledge, such as documents, and they focus on isolated
aspects of manufacturing processes. To address these issues, we present the
Manufacturing Knowledge Repository, a holistic repository that consolidates
structured and unstructured process knowledge to facilitate knowledge
management and process optimization in manufacturing. First, we define
requirements, especially the types of knowledge to be handled, e. g., data
mining models and text documents. Next, we develop a conceptual repository data
model associating knowledge items and process components such as machines and
process steps. Furthermore, we discuss implementation issues including storage
architecture variants and present both an evaluation of the data model and a
proof of concept based on a prototypical implementation.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-10&engl=1}
}
@inproceedings {INPROC-2014-09,
author = {Christoph Gr{\"o}ger and Christoph Stach},
title = {{The Mobile Manufacturing Dashboard}},
booktitle = {Proceedings of the 2014 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops), 24-28 March, 2014, Budapest, Hungary},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Demonstration},
month = {March},
year = {2014},
language = {English},
cr-category = {J.1 Administration Data Processing},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Real-time monitoring and analysis of manufacturing processes are critical
success factors in the smart factory. While there is a variety of data
analytics tools for process optimization, almost each of these applications is
designed for desktop PCs and focuses on selected process aspects, only. I. e.,
there is a gap between the site the analysis outcomes occur (the management
level) and the site where an immediate reaction to these results is required
(the factory shop floor). Even worse, there is no mobile, holistic and
analytics-based information provisioning tool for workers and production
supervisors on the shop floor but rudimentary systems designed for limited
application areas, only. Therefore, we introduce our Mobile Manufacturing
Dashboard (MMD), a situation-aware manufacturing dashboard for mobile devices.
The MMD provides advanced analytics and addresses the full range of
process-oriented information needs of both shop floor workers and production
supervisors. In this paper, we give a brief overview of the MMD’s major
architecture and implementation aspects and describe two representative
real-world scenarios for the MMD. These characteristic scenarios target shop
floor workers and production supervisors and illustrate situation-aware
information provisioning in the smart factory.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2014-09&engl=1}
}
@inproceedings {INPROC-2013-50,
author = {Tobias Binz and Uwe Breitenb{\"u}cher and Oliver Kopp and Frank Leymann},
title = {{Automated Discovery and Maintenance of Enterprise Topology Graphs}},
booktitle = {Proceedings of the 6th IEEE International Conference on Service Oriented Computing \& Applications (SOCA 2013)},
publisher = {IEEE Computer Society Conference Publishing Services},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {126--134},
type = {Conference Paper},
month = {December},
year = {2013},
doi = {10.1109/SOCA.2013.29},
keywords = {Discovery; Maintenance; Enterprise Topology Graph; Enterprise IT; Crawling},
language = {English},
cr-category = {K.6 Management of Computing and Information Systems,
D.2.12 Software Engineering Interoperability},
contact = {a href=``http://www.iaas.uni-stuttgart.de/institut/mitarbeiter/binz''Tobias Binz/ a},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {Enterprise Topology Graphs (ETGs) represent a snapshot of the complete
enterprise IT, including all its applications, processes, services, components,
and their dependencies. In the past, ETGs have been applied in analysis,
optimization, and adaptation of enterprise IT. But how to discover and maintain
a complete, accurate, fresh, and fine-grained Enterprise Topology Graph?
Existing approaches either do not provide enough technical details or do not
cover the complete scope of Enterprise Topology Graphs. Although existing tools
are able to discover valuable information, there is no means for seamless
integration. This paper proposes a plugin-based approach and extensible
framework for automated discovery and maintenance of Enterprise Topology
Graphs. The approach is able to integrate various kinds of tools and techniques
into a unified model. We implemented the proposed approach in a prototype and
applied it to different scenarios. Due to the vital role of discovery plugins
in our approach, we support plugin development with a systematic testing method
and discuss the lessons we learned. The results presented in this paper enable
new ways of enterprise IT optimization, analysis, and adaptation. Furthermore,
they unlock the full potential of past research, which previously required
manual modeling of ETGs.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2013-50&engl=1}
}
@inproceedings {INPROC-2013-49,
author = {Uwe Breitenb{\"u}cher and Tobias Binz and Oliver Kopp and Frank Leymann and Johannes Wettinger},
title = {{Integrated Cloud Application Provisioning: Interconnecting Service-Centric and Script-Centric Management Technologies}},
booktitle = {Proceedings of the 21st International Conference on Cooperative Information Systems (CoopIS 2013)},
address = {Stuttgart},
publisher = {Springer Berlin Heidelberg},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Computer Science},
volume = {8185},
pages = {130--148},
type = {Conference Paper},
month = {September},
year = {2013},
isbn = {978-3-642-41029-1},
doi = {10.1007/978-3-642-41030-7_9},
keywords = {Cloud Application Provisioning; Integration; Management Scripts; Management Services},
language = {English},
cr-category = {K.6 Management of Computing and Information Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {Modern Cloud applications employ a plethora of components and XaaS offerings
that need to be configured during provisioning. Due to increased heterogeneity,
complexity is growing and existing approaches reach their limits if multiple
different provisioning and configuration technologies are involved. They are
not able to integrate them in an automated, flexible, and customizable way.
Especially combining proprietary management services with script-centric
configuration management technologies is currently a major challenge. To enable
automated provisioning of such applications, we introduce Generic Lifecycle
Management Planlets that provide a means to combine custom provisioning logic
with common provisioning tasks. We implemented planlets for provisioning and
customization of components and XaaS offerings based on both SOAP and RESTful
Web services as well as configuration management technologies such as Chef to
show the feasibility of the approach. By using our approach, multiple
technologies can be combined seamlessly.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2013-49&engl=1}
}
@inproceedings {INPROC-2013-48,
author = {Uwe Breitenb{\"u}cher and Tobias Binz and Oliver Kopp and Frank Leymann and Matthias Wieland},
title = {{Policy-Aware Provisioning of Cloud Applications}},
booktitle = {SECURWARE 2013, The Seventh International Conference on Emerging Security Information, Systems and Technologies},
publisher = {Xpert Publishing Services},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {86--95},
type = {Conference Paper},
month = {August},
year = {2013},
isbn = {978-1-61208-298-1},
keywords = {Cloud Applications; Provisioning; Security; Policies},
language = {English},
cr-category = {K.6 Management of Computing and Information Systems},
ee = {http://www.thinkmind.org/index.php?view=article&articleid=securware_2013_4_40_30149},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {The automated provisioning of complex composite Cloud applications is a major
issue and of vital importance in Cloud computing. It is key to enable Cloud
properties such as pay-as-you-go pricing, on-demand self-service, and
elasticity. The functional aspects of provisioning such as instantiating
virtual machines or installing software components are covered by several
technologies on different technical levels: some are targeted to a pretty high
level such as Amazon’s Cloud Formation, some deal with deep technical issues
based on scripts such as Chef or Puppet. However, the currently available
solutions are tightly coupled to individual technologies without being able to
consider non-functional security requirements in a non-proprietary and
interoperable way. In this paper, we present a concept and framework extension
enabling the integration of heterogeneous provisioning technologies under
compliance with non-functional aspects defined by policies.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2013-48&engl=1}
}
@inproceedings {INPROC-2013-46,
author = {Oliver Kopp and Tobias Binz and Uwe Breitenb{\"u}cher and Frank Leymann},
title = {{Winery - A Modeling Tool for TOSCA-based Cloud Applications}},
booktitle = {Proceedings of 11th International Conference on Service-Oriented Computing (ICSOC'13)},
publisher = {Springer Berlin Heidelberg},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {LNCS},
volume = {8274},
pages = {700--704},
type = {Demonstration},
month = {December},
year = {2013},
doi = {10.1007/978-3-642-45005-1_64},
keywords = {Cloud Applications; Modeling; TOSCA; Management; Portability},
language = {English},
cr-category = {K.1 The Computer Industry,
K.6.4 System Management,
D.2.12 Software Engineering Interoperability},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {TOSCA is a new OASIS standard to describe composite applications and their
management. The structure of an application is described by a topology, whereas
management plans describe the application's management functionalities, e.g.,
provisioning or migration. Winery is a tool offering an HTML5-based environment
for graph-based modeling of application topologies and defining reusable
component and relationship types. Thereby, it uses TOSCA as internal storage,
import, and export format. This demonstration shows how Winery supports
modeling of TOSCA-based applications. We use the school management software
Moodle as running example throughout the paper.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2013-46&engl=1}
}
@inproceedings {INPROC-2013-45,
author = {Tobias Binz and Uwe Breitenb{\"u}cher and Florian Haupt and Oliver Kopp and Frank Leymann and Alexander Nowak and Sebastian Wagner},
title = {{OpenTOSCA - A Runtime for TOSCA-based Cloud Applications}},
booktitle = {Proceedings of 11th International Conference on Service-Oriented Computing (ICSOC'13)},
publisher = {Springer Berlin Heidelberg},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {LNCS},
volume = {8274},
pages = {692--695},
type = {Demonstration},
month = {December},
year = {2013},
doi = {10.1007/978-3-642-45005-1_62},
keywords = {TOSCA; Cloud Applications; Automation; Management; Portability},
language = {English},
cr-category = {K.1 The Computer Industry,
K.6.4 System Management,
D.2.12 Software Engineering Interoperability},
contact = {a href=``http://www.iaas.uni-stuttgart.de/institut/mitarbeiter/binz''Tobias Binz/ a},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {TOSCA is a new standard facilitating platform independent description of Cloud
applications. OpenTOSCA is a runtime for TOSCA-based Cloud applications. The
runtime enables fully automated plan-based deployment and management of
applications defined in the OASIS TOSCA packaging format CSAR. This paper
outlines the core concepts of TOSCA and provides a system overview on OpenTOSCA
by describing its modular and extensible architecture, as well as presenting
our prototypical implementation. We demonstrate the use of OpenTOSCA by
deploying and instantiating the school management and learning application
Moodle.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2013-45&engl=1}
}
@inproceedings {INPROC-2013-44,
author = {Tim Waizenegger and Matthias Wieland and Tobias Binz and Uwe Breitenb{\"u}cher and Frank Leymann},
title = {{Towards a Policy-Framework for the Deployment and Management of Cloud Services}},
booktitle = {SECURWARE 2013, The Seventh International Conference on Emerging Security Information, Systems and Technologies},
editor = {Hans-Joachim Hof and Carla Westphall},
address = {Barcelona, Spain},
publisher = {IARIA},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {14--18},
type = {Conference Paper},
month = {August},
year = {2013},
isbn = {978-1-61208-298-1},
keywords = {Cloud Computing; Security; Policy-Framework; TOSCA; Cloud Service; Cloud Management},
language = {English},
cr-category = {D.2.7 Software Engineering Distribution, Maintenance, and Enhancement,
D.2.9 Software Engineering Management,
D.2.13 Software Engineering Reusable Software},
contact = {tim.waizenegger@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {As the adoption of Cloud Computing is growing, the automated deployment of
cloud-based systems is becoming more and more important. New standards, such as
TOSCA (OASIS), allow the modeling of interoperable Cloud services. It is now
possible to build reusable and portable cloud services that can be (semi-)
automatically deployed by different cloud-deployment-engines at various Cloud
environments. However, there is still an acceptance problem among potential
users, especially in the enterprise segment, that stems from security issues
like data security. To improve security in automatic Cloud management engines,
this paper proposes a framework for processing non-functional requirements of
Cloud services.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2013-44&engl=1}
}
@inproceedings {INPROC-2013-43,
author = {Tim Waizenegger and Matthias Wieland and Tobias Binz and Uwe Breitenb{\"u}cher and Florian Haupt and Oliver Kopp and Frank Leymann and Bernhard Mitschang and Alexander Nowak and Sebastian Wagner},
title = {{Policy4TOSCA: A Policy-Aware Cloud Service Provisioning Approach to Enable Secure Cloud Computing}},
booktitle = {On the Move to Meaningful Internet Systems: OTM 2013 Conferences},
editor = {Robert Meersman and Herve Panetto and Tharam Dillon and Johann Eder and Zohra Bellahsene and Norbert Ritter and Pieter De Leenheer and Dou Deijing},
address = {Heidelberg},
publisher = {Springer Berlin Heidelberg},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Computer Science (LNCS)},
volume = {8185},
pages = {360--376},
type = {Conference Paper},
month = {September},
year = {2013},
isbn = {978-3-642-41029-1},
doi = {10.1007/978-3-642-41030-7_26},
keywords = {Cloud Computing, TOSCA, Cloud Service, Cloud Management, Policy-Framework, Security, Green-IT, Sustainable Cloud Service},
language = {English},
cr-category = {D.2.7 Software Engineering Distribution, Maintenance, and Enhancement,
D.2.9 Software Engineering Management,
D.2.13 Software Engineering Reusable Software},
contact = {tim.waizenegger@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {With the growing adoption of Cloud Computing, automated deployment and
provisioning systems for Cloud applications are becoming more prevalent. They
help to reduce the onboarding costs for new customers as well as the financial
impact of managing Cloud Services by automating these previously manual tasks.
With the widespread use of such systems, the adoption of a common standard for
describing Cloud applications will provide a crucial advantage by enabling
reusable and portable applications. TOSCA, a newly published standard by OASIS
with broad industry participation provides this opportunity. Besides the
technical requirements of running and managing applications in the cloud,
non-functional requirements, like cost, security, and environmental issues, are
of special importance when moving towards the automated provisioning and
management of Cloud applications. In this paper we demonstrate how
non-functional requirements are defined in TOSCA using policies. We propose a
mechanism for automatic processing of these formal policy definitions in a
TOSCA runtime environment that we have developed based on the proposed
architecture of the TOSCA primer. In order to evaluate our approach, we present
prototypical implementations of security policies for encrypting databases and
for limiting the geographical location of the Cloud servers. We demonstrate how
our runtime environment is ensuring these policies and show how they affect the
deployment of the application.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2013-43&engl=1}
}
@inproceedings {INPROC-2013-38,
author = {Stefan Silcher and Jan K{\"o}nigsberger and Peter Reimann and Bernhard Mitschang},
title = {{Cooperative service registries for the service-based Product Lifecycle Management architecture}},
booktitle = {Proceedings of the 17th IEEE International Conference on Computer Supported Cooperative Work in Design (CSCWD '13)},
publisher = {IEEE Xplore},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {439--446},
type = {Conference Paper},
month = {June},
year = {2013},
isbn = {978-1-4673-6083-8},
doi = {10.1109/CSCWD.2013.6581003},
keywords = {Collaborative Product Lifecycle Management; Cooperative Service Registries; Enterprise Service Bus; Service-oriented Architecture},
language = {English},
cr-category = {D.2.11 Software Engineering Software Architectures,
D.2.13 Software Engineering Reusable Software,
H.3.4 Information Storage and Retrieval Systems and Software,
J.1 Administration Data Processing},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Product Lifecycle Management (PLM) comprises many different tasks across
multiple domains, such as product development and production. Thus,
multidisciplinary engineering teams have to collaborate to successfully design
and produce products. Nowadays, engineers are supported with many software
solutions, which are tailored to the work of each engineer. The problem is the
missing or bad integration between these IT solutions, which leads to
noncontinuous processes and an insufficient cooperation. The Service-oriented
Architecture (SOA) supports the needed flexible integration of applications
based on services and moreover an automation and integration of processes via
workflows. In previous work, we proposed a service-oriented PLM architecture
that provides these benefits and supports continuous processes. Thereby,
services of different domains and phases of the product life cycle need to
collaborate in a distributed fashion. In this paper, we systematically
identify, define and rate representative models for the management of
corresponding distributed service registries, which enable an efficient
collaboration of services. Based on a prototypical implementation of the
best-rated model in a layout redesign scenario, we assess our approach for its
suitability in PLM. The selected service registry model provides transparent
access to all services of different domains and shows the ease of integrating
new applications into the product life cycle. It thereby enables an improved
cooperation of engineers across various domains to define cross-domain
processes.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2013-38&engl=1}
}
@inproceedings {INPROC-2013-34,
author = {Stefan Silcher and Barbara Seeberg and Erich Zahn and Bernhard Mitschang},
title = {{A Holistic Management Model for Manufacturing Companies and Related IT Support}},
booktitle = {Procedia CIRP},
publisher = {CIRP},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {175--180},
type = {Conference Paper},
month = {July},
year = {2013},
issn = {2212-8271},
doi = {http://dx.doi.org/10.1016/j.procir.2013.05.030},
keywords = {Product Lifecycle Management, Supply Chain Management, Factory Lifecycle Management, Holistic Management Model, IT Integration, Service-oriented Architecture, Enterprise Service Bus},
language = {English},
cr-category = {C.1.3 Processor Architectures, Other Architecture Styles,
J.1 Administration Data Processing},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Abstract Life cycle and management concepts are a necessity to compete in
current turbulent markets. Small- and medium-sized enterprises (SME) struggle
when realizing such concepts and accordant $\backslash${IT$\backslash$} support. In this paper we
review different concepts and their similarities and differences are discussed.
We focus on Product Lifecycle Management (PLM), Supply Chain Management and
Factory Lifecycle Management to integrate them into a holistic management
model. Subsequently, we extend a service-based $\backslash${PLM$\backslash$} architecture to support
the holistic management model to continuously support processes. The usage of
standardized technologies allows companies, and especially SMEs, to implement
this architecture with low costs and effort.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2013-34&engl=1}
}
@inproceedings {INPROC-2013-32,
author = {Carlos L{\"u}bbe and Bernhard Mitschang},
title = {{Holistic Load-Balancing in a Distributed Spatial Cache}},
booktitle = {Proceedings of the 2013 IEEE 14th International Conference on Mobile Data Management},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {267--270},
type = {Conference Paper},
month = {June},
year = {2013},
doi = {10.1109/MDM.2013.38},
keywords = {Caching; Geographic Information Systems; Peer-to-peer},
language = {English},
cr-category = {H.2 Database Management},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2013-32/INPROC-2013-32.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {A steadily growing number of people using location based services (LBS) inflict
massive query loads on the data tier of an LBS. As such queries usually possess
considerable overlap, multiple cache nodes collaborating in a distributed
spatial cache can provide scalable access to frequently used data. To preserve
high throughput throughout the complete execution process, it is necessary to
balance the accumulating load among the participating cache nodes. In this
work, we identify three key-indicators to improve resource utilization during
the load-balancing process: data skew, anticipated data access patterns and
dynamic load peaks. For this reason, we introduce a comprehensive mathematical
model to express the key-indicators as probability distribution functions. We
fuse the different key-indicators into a single holistic distribution model. In
the course of this, we devise a methodology from our holistic distribution
model towards a distributed spatial cache offering improved load-balancing},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2013-32&engl=1}
}
@inproceedings {INPROC-2013-24,
author = {Christoph Stach},
title = {{Wie funktioniert Datenschutz auf Mobilplattformen?}},
booktitle = {Informatik 2013: Informatik angepasst an Mensch, Organisation und Umwelt, Tagungsband der 43. Jahrestagung der Gesellschaft f{\"u}r Informatik e.V. (GI), 16.09. - 20.09.2013, Universit{\"a}t Koblenz-Landau},
editor = {Gesellschaft f{\"u}r Informatik e.V. (GI)},
publisher = {Springer-Verlag},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Informatics},
pages = {1--15},
type = {Workshop Paper},
month = {September},
year = {2013},
keywords = {Datenschutz; Mobilplattform; Berechtigungssystem; Privacy Management Platform},
language = {German},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
D.4.6 Operating Systems Security and Protection},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Die wachsende Verbreitung von mobilen Ger{\"a}ten, bei denen einerseits sehr viele
Kontextdaten und andererseits zahlreich pers{\"o}nliche Informationen anfallen,
macht diese zu einem hervorragenden Ziel f{\"u}r Angriffe auf die Privatsph{\"a}re.
Versch{\"a}rft wird diese Situation dadurch, dass jede Mobilplattform eine eigene
Strategie zum Schutz dieser Daten verfolgt, allerdings ohne dabei den Nutzer in
geeignetem Ma{\ss}e mit einzubeziehen. Aus diesem Grund gibt es eine Vielzahl an
Erweiterungen f{\"u}r diese Berechtigungssysteme. F{\"u}r den Nutzer bleibt dabei
weiterhin die Frage, f{\"u}r welche Plattform und f{\"u}r welches Berechtigungssysteme
er sich entscheiden soll.
In diesem Papier besch{\"a}ftigen wir uns daher mit den Eigenschaften aktueller
Mobilplattformen und einiger aktueller Berechtigungssysteme. Wir stellen mit
der Privacy Management Platform (PMP) unser eigenes Berechtigungssystem vor,
das sich stark an den Bed{\"u}rfnissen der Nutzer orientiert. Neben dem Modell f{\"u}r
die Berechtigungsrichtlinien hat allerdings auch die Art, wie diese Modell in
die Mobilplattform eingebunden wird, entscheidenden Einfluss auf die
Eigenschaften des Systems. Durch die Gegen{\"u}berstellung dieser Eigenschaften,
wird dem Nutzer die Wahl einer Plattform und eines Berechtigungssystems, das
seinen Bed{\"u}rfnissen gen{\"u}gt, erleichtert.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2013-24&engl=1}
}
@inproceedings {INPROC-2013-15,
author = {Christoph Gr{\"o}ger and Mark Hillmann and Friedemann Hahn and Bernhard Mitschang and Engelbert Westk{\"a}mper},
title = {{The Operational Process Dashboard for Manufacturing}},
booktitle = {Proceedings of the 46th CIRP Conference on Manufacturing Systems (CMS2013), 29-31 May, 2013, Sesimbra, Portugal},
publisher = {Elsevier},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Conference Paper},
month = {May},
year = {2013},
language = {English},
cr-category = {J.1 Administration Data Processing},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Agility is a critical success factor for manufacturers in today’s volatile
global environment and requires employees monitoring their performance and
reacting quickly to turbulences. Thus, comprehensive information provisioning
on all hierarchy levels is necessary. Yet, existing IT systems, e. g.,
Manufacturing Execution Systems, scarcely address information needs of workers
on the shop floor level. This causes uncoordinated waiting times, inflexibility
and costly communication. To address these issues, we present the Operational
Process Dashboard for Manufacturing (OPDM), a mobile dashboard for shop floor
workers. We identify process-oriented information needs, develop technical
dashboard services and define IT requirements for an implementation.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2013-15&engl=1}
}
@inproceedings {INPROC-2013-14,
author = {Christoph Gr{\"o}ger and Stefan Silcher and Engelbert Westk{\"a}mper and Bernhard Mitschang},
title = {{Leveraging Apps in Manufacturing. A Framework for App Technology in the Enterprise}},
booktitle = {Proceedings of the 46th CIRP Conference on Manufacturing Systems (CMS2013), 29-31 May, 2013, Sesimbra, Portugal},
publisher = {Elsevier},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Conference Paper},
month = {May},
year = {2013},
language = {English},
cr-category = {J.1 Administration Data Processing},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Apps on mobile devices like smartphones have become the core of the digital
life of consumers. Apps are used, e. g., for shopping or communicating in
social networks. Recently, apps are gaining more and more attention in
enterprises as enabler for agile process optimization. In this article, we
discuss the potentials and challenges of exploiting this technology with a
focus on the manufacturing industry. We come up with a framework for apps in
manufacturing companies and identify major areas that need further
investigations to fully leverage apps. Moreover, we present existing and novel
apps across the product life cycle.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2013-14&engl=1}
}
@inproceedings {INPROC-2013-13,
author = {Christoph Stach},
title = {{How to Assure Privacy on Android Phones and Devices?}},
booktitle = {Proceedings of the 14th International Conference on Mobile Data Management},
publisher = {IEEE Computer Society Conference Publishing Services},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--3},
type = {Demonstration},
month = {June},
year = {2013},
keywords = {privacy management; Android; demonstrator},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
D.4.6 Operating Systems Security and Protection},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {There is an increasing popularity of mobile devices-especially Android
devices-particularly because of the huge amount of available third-party
applications. Albeit, the number of diagnosed hacker attacks against mobile
user increased in unison, as these devices became the prime target of the
latest malware, thanks to inexperienced users and a negligent way of dealing
with private data. To make matters worse, the Android permission system is much
too coarse-grained and too hard to grasp for an average user. However, even if
a user is able to comprehend the meaning and impact of a certain permission, in
the end, s/he must grant all requested permission anyhow, if s/he wants to
install the application.
Therefore, we introduce PMP a privacy management platform for Android, which
enables a user to grant an application fine-grained access rights to critical
data. Furthermore, those rights can depend on some contextual constraints (e.g.
Internet usage is restricted to certain locations) and the policy rules can be
modified at run-time. Depending upon the granted rights an application provides
a different scope of service. Moreover, the user is-due to a catchy GUI-always
informed what impact the granting or respectively the revocation of a
permission has on the application's service quality.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2013-13&engl=1}
}
@inproceedings {INPROC-2013-12,
author = {Tim Waizenegger and Schiller Oliver and Mega Cataldo},
title = {{Datensicherheit in mandantenf{\"a}higen Cloud Umgebungen}},
booktitle = {Tagungsband: Proceedings der 15. Fachtagung des GI-Fachbereichs „Datenbanken und Informationssysteme“ (DBIS)},
editor = {Gesellschaft f{\"u}r Informatik (GI)},
address = {Magdeburg},
publisher = {Gesellschaft f{\"u}r Informatik (GI)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Series of the Gesellschaft f{\"u}r Informatik (GI)},
pages = {477--489},
type = {Conference Paper},
month = {March},
year = {2013},
isbn = {978-3-88579-608-4},
keywords = {Cloud; Cloud Computing; Security; Mandantenf{\"a}hig; Schl{\"u}sselverwaltung; Key Management},
language = {German},
cr-category = {D.4.6 Operating Systems Security and Protection,
E.3 Data Encryption},
contact = {tim.waizenegger@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Cloud Computing wird aktuell haupts{\"a}chlich f{\"u}r wissenschaftliches Rechnen und
endkundenorientierte Dienste verwendet, da die Kostenersparnis hier ein
besonders wichtiger Faktor ist. Die Betreiber von Cloud Plattformen sind jedoch
immer st{\"a}rker daran interessiert Cloud Dienste auch im Enterprise Segment
anzubieten, um hier gleicherma{\ss}en von Kostenvorteilen zu profitieren.
Die Kundenresonanz aus diesem Segment l{\"a}sst jedoch zu w{\"u}nschen {\"u}brig. Die
Gr{\"u}nde daf{\"u}r sind Bedenken bez{\"u}glich Datensicherheit und -vertraulichkeit in
mandantenf{\"a}higen Systemen. Um diesem Problem zu begegnen, haben wir die
Herausforderungen bei der Absicherung von mandantenf{\"a}higen Cloud Diensten
untersucht, und den Umgang mit vertraulichem Schl{\"u}sselmaterial und
Anmeldedaten als Schwachstelle identifiziert.
Dieser Beitrag zeigt eine konzeptionelle L{\"o}sung zur zentralen Ablage und
Zugriffsverwaltung sensibler Daten, sowie deren prototypische Implementierung
innerhalb der IBM Cloud L{\"o}sung SmartCloud Content Management.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2013-12&engl=1}
}
@inproceedings {INPROC-2013-11,
author = {Christoph Stach and Bernhard Mitschang},
title = {{Privacy Management for Mobile Platforms - A Review of Concepts and Approaches}},
booktitle = {Proceedings of the 14th International Conference on Mobile Data Management},
publisher = {IEEE Computer Society Conference Publishing Services},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--9},
type = {Conference Paper},
month = {June},
year = {2013},
keywords = {privacy; profound overview; permission model},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
D.4.6 Operating Systems Security and Protection},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The still rising popularity of modern mobile phones results in an increased
demand for manifold applications for these devices. As Android OS supports the
development and usage of third-party software greatly, there are more and more
developers for this platform. However, many of those applications handle
private data grossly negligent which immediately leads to serious privacy
concerns. To make matters worse, the current Android permission rules are much
too coarse and incomprehensible from the average user's perspective. But even
if s/he understands the meaning of the permissions, s/he must either accept all
of them or waive the application.
Therefore we review concepts and approaches towards effective privacy
management for mobile platforms. All this is discussed based on the prevailing
key players in the mobile market, namely Apple, RIM, Microsoft and Google. As
this work has been initiated by Google we mainly concentrated on Android-based
concepts towards customizable privacy management approaches. As a result of our
review and taking into account current initiatives and trends in the market, we
come up with a novel approach, an implementation architecture and a prototype.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2013-11&engl=1}
}
@inproceedings {INPROC-2013-02,
author = {Peter Reimann and Holger Schwarz},
title = {{Datenmanagementpatterns in Simulationsworkflows}},
booktitle = {Proceedings der 15. GI-Fachtagung Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2013)},
editor = {Gesellschaft f{\"u}r Informatik (GI)},
address = {Magdeburg},
publisher = {Lecture Notes in Informatics (LNI)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Gesellschaft f{\"u}r Informatik (GI)},
pages = {279--293},
type = {Conference Paper},
month = {March},
year = {2013},
keywords = {Datenbereitstellung; Datenmanagementpatterns; Workflow; SIMPL; Simulationsworkflow; BPEL; WS-BPEL},
language = {German},
cr-category = {H.2.5 Heterogeneous Databases,
H.2.8 Database Applications,
H.4.1 Office Automation},
contact = {Peter Reimann Peter.Reimann@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Simulationsworkflows m{\"u}ssen oftmals gro{\ss}e Datenmengen verarbeiten, die in einer
Vielzahl propriet{\"a}rer Formate vorliegen. Damit diese Daten von den im Workflow
eingebundenen Programmen und Diensten verarbeitet werden k{\"o}nnen, m{\"u}ssen sie in
passende Formate transformiert werden. Dies erh{\"o}ht die Komplexit{\"a}t der
Workflowmodellierung, welche i.d.R. durch die Wissenschaftler selbst erfolgt.
Dadurch k{\"o}nnen sich diese weniger auf den Kern der eigentlichen Simulation
konzentrieren. Zur Behebung dieses Defizits schlagen wir einen Ansatz vor, mit
dem die Aktivit{\"a}ten zur Datenbereitstellung in Simulationsabl{\"a}ufen abstrakt
modelliert werden k{\"o}nnen. Wissenschaftler sollen keine Implementierungsdetails,
sondern lediglich die Kernaspekte der Datenbereitstellung in Form von Patterns
beschreiben. Die Spezifikation der Patterns soll dabei m{\"o}glichst in der Sprache
der mathematischen Simulationsmodelle erfolgen, mit denen Wissenschaftler
vertraut sind. Eine Erweiterung des Workflowsystems bildet die Patterns
automatisch auf ausf{\"u}hrbare Workflowfragmente ab, welche die
Datenbereitstellung umsetzen. Dies alles reduziert die Komplexit{\"a}t der
Modellierung von Simulationsworkflows und erh{\"o}ht die Produktivit{\"a}t der
Wissenschaftler.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2013-02&engl=1}
}
@inproceedings {INPROC-2012-38,
author = {Carlos L{\"u}bbe and Nazario Cipriani},
title = {{SimPl: A Simulation Platform for Elastic Load-Balancing in a Distributed Spatial Cache Overlay}},
booktitle = {Proc. of the 13th International Conference on Mobile Data Management (MDM).},
address = {Washington, DC, USA},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {0--4},
type = {Conference Paper},
month = {July},
year = {2012},
keywords = {load-balancing; simulation; spatial data; peer-to-peer},
language = {English},
cr-category = {H.2.4 Database Management Systems},
contact = {carlos.luebbe@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Location-based services (LBS) have to cope with increasing query loads at their
data tier. Yet, the data access patterns of LBS typically possess spatial
locality. Therefore, a dedicated spatial cache which provides efficient access
to the data currently needed may considerably reduce this load. To ensure high
throughput throughout the entire execution, previous work introduced an elastic
load-balancing mechanism for multiple cache nodes that collaborate in a
distributed spatial cache overlay. However, calibrating such a load-balancing
mechanism is a non-trivial task, as several parameters influence such a system.
We demonstrate a simulation platform (SimPl) for elastic load-balancing. SimPl
enables a network administrator to set up several overlay topologies and
calibrate their system parameters using different spatial data access patterns.
A live visualization of the simulated overlay enables intuitive comparison of
overlay topologies and their load-balancing abilities.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2012-38&engl=1}
}
@inproceedings {INPROC-2012-37,
author = {Carlos L{\"u}bbe and Anja Reuter and Bernhard Mitschang},
title = {{Elastic Load-Balancing in a Distributed Spatial Cache Overlay}},
booktitle = {Proc. of the 13th International Conference on Mobile Data Management (MDM)},
address = {Washington, DC, USA},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {0--10},
type = {Conference Paper},
month = {July},
year = {2012},
keywords = {load-balancing; caching; spatial data; peer-to-peer},
language = {German},
cr-category = {H.2.4 Database Management Systems},
contact = {carlos.luebbe@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Location-based services (LBS) have gained enormous popularity, which imposes
increasing query loads at the data tier of an LBS. Yet, the data access
patterns of LBS typically possess high temporal and spatial locality.
Therefore, a dedicated spatial cache which provides efficient access to the
data currently needed may considerably reduce this load. To ensure high
throughput, multiple cache nodes can collaborate in a distributed spatial cache
overlay, which balances load among the nodes. However, load-balancing is a
non-trivial task in this context, as load spreads unevenly in space and varies
notably over time. This requires constant readjustment to shifting hot spots.
We present an elastic load-balancing mechanism between cache nodes that is
based on the physical model of a particle-spring system. Using spring
contraction, nodes instantly form processing clusters in regions with high load
and thus can easily work off accumulating queries. Our evaluation shows that
our approach quickly adapts to rapidly changing hot spots and thereby ensures
high throughput throughout the entire execution.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2012-37&engl=1}
}
@inproceedings {INPROC-2012-36,
author = {Nazario Cipriani and Christoph Stach and Oliver D{\"o}rler and Bernhard Mitschang},
title = {{NexusDSS - A System for Security Compliant Processing of Data Streams}},
booktitle = {Proceedings of the First International Conference on Data Technologies and Applications (DATA 2012)},
publisher = {SciTePress Digital Library},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--11},
type = {Conference Paper},
month = {July},
year = {2012},
language = {English},
cr-category = {C.2.0 Computer-Communication Networks, General,
K.6.5 Security and Protection,
D.4.6 Operating Systems Security and Protection},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Technological advances in microelectronic and communication technology are
increasingly leading to a highly connected environment equipped with sensors
producing a continuous flow of context data. The steadily growing number of
sensory context data available enables new application scenarios and drives new
processing techniques. The growing pervasion of everyday life with social media
and the possibility of interconnecting them with moving objects{\^a}€™ traces,
leads to a growing importance of access control for this kind of data since it
concerns privacy issues. The challenge in twofold: First mechanisms to control
data access and data usage must be established and second efficient and
flexible processing of sensible data must be supported. In this paper we
present a flexible and extensible security framework which provides mechanisms
to enforce requirements for context data access and beyond that support safe
processing of sensible context data according to predefined processing rules.
In addition and in contrast to previous concepts, our security framework
especially supports fine-grained control to contextual data.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2012-36&engl=1}
}
@inproceedings {INPROC-2012-31,
author = {Christoph Gr{\"o}ger and Johannes Schlaudraff and Florian Niedermann and Bernhard Mitschang},
title = {{Warehousing Manufacturing Data. A Holistic Process Warehouse for Advanced Manufacturing Analytics}},
booktitle = {Proceedings of the 14th International Conference on Data Warehousing and Knowledge Discovery - DaWaK 2012},
editor = {Alfredo Cuzzocrea and Umeshwar Dayal},
address = {Berlin, Heidelberg},
publisher = {Springer},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Computer Science},
volume = {7448},
pages = {142--155},
type = {Conference Paper},
month = {September},
year = {2012},
keywords = {Data Warehouse; Manufacturing; Process Optimization; Analytics; Business Intelligence; Data Integration},
language = {English},
cr-category = {H.2.7 Database Administration,
J.1 Administration Data Processing},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Strong competition in the manufacturing industry makes efficient and effective
manufacturing processes a critical success factor. However, existing
warehousing and analytics approaches in manufacturing are coined by substantial
shortcomings, significantly preventing comprehensive process improvement.
Especially, they miss a holistic data base integrating operational and process
data, e. g., from Manufacturing Execution and Enterprise Resource Planning
systems. To address this challenge, we introduce the Manufacturing Warehouse, a
concept for a holistic manufacturing-specific process warehouse as central part
of the overall Advanced Manufacturing Analytics Platform. We define a
manufacturing process meta model and deduce a universal warehouse model. In
addition, we develop a procedure for its instantiation and the integration of
concrete source data. Finally, we describe a first proof of concept based on a
prototypical implementation.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2012-31&engl=1}
}
@inproceedings {INPROC-2012-28,
author = {Stefan Silcher and Max Dinkelmann and Jorge Minguez and Bernhard Mitschang},
title = {{A Service-based Integration for an improved Product Lifecycle Management}},
booktitle = {Proceedings of the 14th International Conference on Enterprise Information Systems},
editor = {Alfredo Cuzzocrea and Jos{\'e} Cordeiro Leszek Maciaszek},
address = {Wroc\&\#322;aw, Poland},
publisher = {INSTICC Press},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {38--47},
type = {Conference Paper},
month = {June},
year = {2012},
isbn = {978-989-8565-10-5},
keywords = {Product Lifecycle Management; Service-oriented Architecture; Modular IT Integration},
language = {English},
cr-category = {D.2.11 Software Engineering Software Architectures,
D.2.13 Software Engineering Reusable Software},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The continuously changing environment is nowadays a major challenge for
companies. The tough competition, growing customization of products and
environmental regulations forces companies to continuously adapt their business
processes. In order to manage the complexity and reduce the effort for
developing products and production, many IT systems are indispensable. Despite
Product Lifecycle Management Technology (PLM) the growing heterogeneous IT
landscapes lack of a continuous support for business processes and get quickly
unmanageable. In this paper PLM technology is extended by a service-based
integration approach. Therefore, a modular service-based architecture was
developed which will be presented in detail. The architecture describes how the
whole product life cycle can be integrated more efficiently. The
characteristics and findings of our approach are presented as well as a first
prototype covering the production planning.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2012-28&engl=1}
}
@inproceedings {INPROC-2012-15,
author = {Christoph Gr{\"o}ger and Florian Niedermann and Bernhard Mitschang},
title = {{Data Mining-driven Manufacturing Process Optimization}},
booktitle = {Proceedings of the World Congress on Engineering 2012 Vol III, WCE 2012, 4 – 6 July, 2012, London, U.K.},
editor = {S. I. Ao and L. Gelman and D. W. L. Hukins and A. Hunter and A. M. Korsunsky},
publisher = {Newswood},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1475--1481},
type = {Conference Paper},
month = {July},
year = {2012},
isbn = {978-988-19252-2-0},
keywords = {Analytics; Data Mining; Decision Support; Process Optimization},
language = {English},
cr-category = {H.2.8 Database Applications,
J.1 Administration Data Processing},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {High competitive pressure in the global manufacturing industry makes efficient,
effective and continuously improved manufacturing processes a critical success
factor. Yet, existing analytics in manufacturing, e. g., provided by
Manufacturing Execution Systems, are coined by major shortcomings considerably
limiting continuous process improvement. In particular, they do not make use of
data mining to identify hidden patterns in manufacturing-related data. In this
article, we present indication-based and pattern-based manufacturing process
optimization as novel data mining approaches provided by the Advanced
Manufacturing Analytics Platform. We demonstrate their usefulness through use
cases and depict suitable data mining techniques as well as implementation
details.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2012-15&engl=1}
}
@inproceedings {INPROC-2012-14,
author = {Christoph Gr{\"o}ger and Florian Niedermann and Holger Schwarz and Bernhard Mitschang},
title = {{Supporting Manufacturing Design by Analytics. Continuous Collaborative Process Improvement enabled by the Advanced Manufacturing Analytics Platform}},
booktitle = {Proceedings of the 2012 16th IEEE International Conference on Computer Supported Cooperative Work in Design (CSCWD), May 23-25, 2012, Wuhan, China},
editor = {Liang Gao and Weiming Shen and Jean-Paul Barth{\`e}s and Junzhou Luo and Jianming Yong and Wenfeng Li and Weidong Li},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {793--799},
type = {Conference Paper},
month = {May},
year = {2012},
isbn = {978-1-4673-1210-3},
keywords = {Analytics; Data Mining; Process Management; Manufacturing; Process Optimization},
language = {English},
cr-category = {H.2.8 Database Applications,
J.1 Administration Data Processing},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The manufacturing industry is faced with global competition making efficient,
effective and continuously improved manufacturing processes a critical success
factor. Yet, media discontinuities, the use of isolated analysis methods on
local data sets as well as missing means for sharing analysis results cause a
collaborative gap in Manufacturing Process Management that prohibits continuous
process improvement. To address this chal-lenge, this paper proposes the
Advanced Manufacturing Analytics (AdMA) Platform that bridges the gap by
integrating operational and process manufacturing data, defining a reposito-ry
for analysis results and providing indication-based and pat-tern-based
optimization techniques. Both the conceptual architec-ture underlying the
platform as well as its current implementa-tion are presented in this paper.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2012-14&engl=1}
}
@inproceedings {INPROC-2012-11,
author = {Christoph Stach and Luiz Fernando Schlindwein},
title = {{Candy Castle - A Prototype for Pervasive Health Games}},
booktitle = {Proceedings of the 2012 IEEE International Conference on Pervasive Computing and Communications Workshops (PERCOM Workshops); Lugano, Switzerland, March 19-23, 2012},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--4},
type = {Demonstration},
month = {March},
year = {2012},
keywords = {pervasive application; health game; health education; data analysis},
language = {English},
cr-category = {K.4 Computers and Society},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Serious games have primarily the function to educate and train, and secondly to
entertain. We present a serious game used for the treatment of diabetes and for
teaching a diabetes-aware lifestyle. The game encourages the player to walk
around in his / her surrounding and check the blood sugar level in as many
different places as possible. Undoubtedly, the special feature of Candy Castle
is our feedback loop which can be used for on-the-fly data analysis and
automatic adaptation of the application. Thus, the patients as well as their
doctors can be automatically alerted if their blood values deteriorate.
In this demo paper, we explain both the design of the game as well as some
interesting implementation aspects of our prototype. Even though we concern
just on the topic of mobile games, all introduced techniques can be transferred
to a general setting and used in any other mobile application.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2012-11&engl=1}
}
@inproceedings {INPROC-2011-87,
author = {Jorge Minguez and Florian Niedermann and Bernhard Mitschang},
title = {{A provenance-aware service repository for EAI process modeling tools}},
booktitle = {IEEE International Conference on Information Reuse and Integration 2011 (IRI '11)},
address = {Las Vegas},
publisher = {IEEE Press},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {42--47},
type = {Conference Paper},
month = {August},
year = {2011},
doi = {10.1109/IRI.2011.6009518},
keywords = {EAI process modeling tool; business process; business service; data interoperability; enterprise application integration; functional interoperability; manufacturing domain; process lifecycle management; provenance aware service repository; provenance data model; provenance subscription capabilities; service engineering methods; service knowledge base; service reusability; business data processing; knowledge based systems; manufacturing industries; open systems},
language = {English},
cr-category = {D.2.11 Software Engineering Software Architectures,
D.2.13 Software Engineering Reusable Software},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {One of the major challenges for Enterprise Application Integration (EAI)
process modeling tools is the continuous adaptation of the business processes
and services. Business and IT specialists are both confronted with a number of
problems involved in the adaptation of such processes, such as the lack of
support for process lifecycle management, data and functional interoperability
problems or the appropriate service knowledge base. Currently, most service
engineering methods adopt a lifecycle strategy for the design, implementation,
deployment and evaluation of services. However, enterprises exploiting service
reusability lack the knowledge on process dependencies across the entire
service lifecycle. This knowledge is required by process modeling tools in
order to keep EAI processes loosely-coupled. Using a provenance data model we
describe the different types of service dependencies in EAI processes with
regard to the service changes across its lifecycle. We present a
provenance-aware service repository with provenance subscription capabilities
and its adoption for different use cases in the manufacturing domain.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-87&engl=1}
}
@inproceedings {INPROC-2011-86,
author = {Nazario Cipriani and Oliver Schiller and Bernhard Mitschang},
title = {{M-TOP: Multi-target Operator Placement of Query Graphs for Data Streams}},
booktitle = {Proceedings of the 15th International Database Engineering and Applications Symposium (IDEAS 2011)},
publisher = {ACM},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {52--60},
type = {Conference Paper},
month = {September},
year = {2011},
language = {English},
cr-category = {G.1.6 Numerical Analysis Optimization,
C.2.3 Network Operations},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Nowadays, many applications processes stream-based data, such as financial
market analysis, network intrusion detection, or visualization applications. To
process stream-based data in an applicationindependent manner, distributed
stream processing systems emerged. They typically translate a query to an
operator graph, place the operators to stream processing nodes, and execute
them to process the streamed data. The operator placement is crucial in such
systems, as it deeply influences query execution. Often, different stream-based
applications require dedicated placement of query graphs according to their
specific objectives, e.g. bandwidth not less than 500 MBit/s and costs not more
that 1 cost unit. This fact constraints operator placement. Existing approaches
do not take into account application-specific objectives, thus not reflecting
application-specific placement decisions. As objectives might conflict among
each other, operator placement is subject to delicate trade-offs, such as
bandwidth maximization is more important than cost reduction. Thus, the
challenge is to find a solution which considers the application-specific
objectives and their trade-offs.
We present M-TOP, an QoS-aware multi-target operator placement framework for
data stream systems. Particularly, we propose an operator placement strategy
considering application-specific targets consisting of objectives, their
respective trade-offs specifications, bottleneck conditions, and ranking
schemes to compute a suitable placement. We integrated M-TOP into NexusDS, our
distributed data stream processing middleware, and provide an experimental
evaluation to show the effectiveness of M-TOP.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-86&engl=1}
}
@inproceedings {INPROC-2011-85,
author = {Nazario Cipriani and Matthias Grossmann and Harald Sanftmann and Bernhard Mitschang},
title = {{Design Considerations of a Flexible Data Stream Processing Middleware}},
booktitle = {Proceedings of the 15th East-European Conference on Advances in Databases and Information Systems (ADBIS 2011)},
publisher = {CEUR-WS.org},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {222--231},
type = {Conference Paper},
month = {September},
year = {2011},
language = {German},
cr-category = {K.6.1 Project and People Management},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Techniques for efficient and distributed processing of huge, unbound data
streams have made some impact in the database community. Distributed data
stream processing systems have emerged providing a distributed environment to
process these potentially unbound streams of data by a set of processing nodes.
A wide range of realtime applications process stream-based data. Sensors and
data sources, such as position data of moving objects, continuously produce
data that is consumed by, e.g., location-aware applications. Depending on the
domain of interest, the processing of such data often depends on
domain-specific functionality. For instance, an application which visualizes
stream-based data has stringent timing constraints, or may even need a specific
hardware environment to smoothly process the data. Furthermore, users may add
additional constraints. E.g., for security reasons they may want to restrict
the set of nodes that participates in processing.
In this paper we review context-aware applications which, despite their
different application fields, share common data processing principles. We
analyse these applications and extract common requirements which data stream
processing systems must meet to support these applications. Finally, we show
how such applications are implemented using NexusDS, our extensible stream
processing middleware.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-85&engl=1}
}
@inproceedings {INPROC-2011-64,
author = {Michael Abel and Peter Klemm and Stefan Silcher and Jorge Minguez},
title = {{Start-Up of Reconfigurable Production Machines with a Service-Oriented Architecture}},
booktitle = {Proceedings of the 21st International Conference on Production Research},
publisher = {Fraunhofer IAO},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--5},
type = {Conference Paper},
month = {August},
year = {2011},
language = {English},
cr-category = {D.2.11 Software Engineering Software Architectures},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Starting-up production machines takes a considerable part of time and
development expenses. Especially in the case of reconfigurable machines a short
start-up phase is essential. Many activities, which are necessary during the
start-up, can be automated. These are the configuration of mechatronic modules,
control- and fieldbus systems as well as the extensive testing of functions.
This paper presents an approach based on a service- oriented architecture (SOA)
to automate the start-up of a reconfigurable production machine. Functionality
for configuration and start-up is provided by an internal middleware system.
The sequence control for the startup process is realised within a configuration
system. A new approach to the combination of field-bus and middleware
communication infrastructure is used to adopt SOA paradigms to existing
automation technology. Thus, real-time communication can be combined with
flexible communication for the automatic configuration of production machines.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-64&engl=1}
}
@inproceedings {INPROC-2011-57,
author = {Andreas Brodt and Oliver Schiller and Bernhard Mitschang},
title = {{Efficient resource attribute retrieval in RDF triple stores}},
booktitle = {Proceeding of the 20th ACM conference on Information and knowledge management (CIKM)},
publisher = {Association for Computing Machinery (ACM)},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
type = {Conference Paper},
month = {October},
year = {2011},
keywords = {RDF; SPARQL},
language = {English},
cr-category = {H.2.4 Database Management Systems,
H.2.2 Database Management Physical Design},
contact = {andreas.brodt@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The W3C Resource Description Framework (RDF) is gaining popularity for its
ability to manage semi-structured data without a predefined database schema. So
far, most RDF query processors have concentrated on finding complex graph
patterns in RDF, which typically involves a high number of joins. This works
very well to query resources by the relations between them. Yet, obtaining a
record-like view on the attributes of resources, as natively supported by
RDBMS, imposes unnecessary performance burdens, as the individual attributes
must be joined to assemble the final result records.
We present an approach to retrieve the attributes of resources efficiently. We
first determine the resources in question and then retrieve all their
attributes efficiently at once, exploiting contiguous storage in RDF indexes.
In addition, we present an index structure which is specifically designed for
RDF attribute retrieval. In a performance evaluation we show that our approach
is clearly superior for larger numbers of retrieved attributes.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-57&engl=1}
}
@inproceedings {INPROC-2011-42,
author = {Jorge Minguez and Peter Reimann and Sema Zor},
title = {{Event-driven Business Process Management in Engineer-to-Order Supply Chains}},
booktitle = {Proceedings of the 15th International Conference on Computer Supported Cooperative Work in Design},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--8},
type = {Conference Paper},
month = {June},
year = {2011},
keywords = {Event-driven Architecture; Service-oriented Architecture; SOA; EDA; Engineer-to-Order; ETO; Supply chain},
language = {English},
cr-category = {D.2.11 Software Engineering Software Architectures,
D.2.13 Software Engineering Reusable Software},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {Integration efforts in today’s manufacturing environments tend to enable
service-based communication interfaces between enterprise and manufacturing
systems. Constantly changing business conditions demand a high level of
flexibility in business processes as well as an adaptive and fully
interoperable IT infrastructure. The principles of reusability and
loosely-coupled services have driven Service Oriented Architecture (SOA) to
become the most used paradigm for software design at the business level. In a
manufacturing environment, event-driven architectures (EDA) are often employed
for managing information flows across different production systems. The timely
propagation of business-relevant events is a fundamental requirement in
Engineer-to-Order (ETO) enterprises, which require a high level of transparency
in their supply chains. Agility is one of the top priorities for ETO
manufacturers in order to react to turbulent scenarios. Therefore, the main
challenge for ETO supply chains is to identify and propagate events across the
ETO logistics network and integrate these into the manufacturer business
processes. We present how an existing service-oriented integration platform for
manufacturing can be used to fill the gap between EDA-based manufacturing
environments of an ETO supply chain and SOA-based manufacturer business
processes. In this paper, we discuss the benefits of the Business Process
Execution Language (BPEL) as vehicle for this integration. The adoption of BPEL
will enable an efficient and effective reaction to turbulent manufacturing
scenarios in an ETO supply chain.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-42&engl=1}
}
@inproceedings {INPROC-2011-41,
author = {Jorge Minguez and David Baureis and Donald Neumann},
title = {{Providing Coordination and Goal Definition in Product-Service Systems through Service-oriented Computing}},
booktitle = {Proceedings of the 44th CIRP International Conference on Manufacturing Systems},
publisher = {CIRP},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {7--12},
type = {Conference Paper},
month = {June},
year = {2011},
keywords = {Manufacturing; Product-Service Systems, PSS, Service-oriented Architecture, SOA},
language = {English},
cr-category = {D.2.11 Software Engineering Software Architectures,
D.2.13 Software Engineering Reusable Software},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Product-Service Systems (PSS) are a strategic approach that offers
manufacturing companies the possibility of long-term differentiation against
competitors by integrating goods and services. The implementation of a PSS
entails challenges for the resulting supply chain structure and the IT
infrastructure supporting coordinated service offerings, such as conflicting
goals and coordination in the integrated business processes. The
Service-oriented Architecture (SOA) paradigm, based on loosely-coupled
components, provides rapid reconfiguration of business processes, rapid
integration of services and goal definition through service level agreements.
This paper presents a PSS service analysis methodology, which supports
coordination and definition of goals in heterogeneous supply chains.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-41&engl=1}
}
@inproceedings {INPROC-2011-40,
author = {Jorge Minguez and Stefan Silcher and Bernhard Mitschang and Engelbert Westk{\"a}mper},
title = {{Towards Intelligent Manufacturing: Equipping SOA-based Architectures with advanced SLM Services}},
booktitle = {Proceedings of the 44th CIRP International Conference on Manufacturing Systems},
publisher = {CIRP},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--8},
type = {Conference Paper},
month = {June},
year = {2011},
keywords = {Service Oriented Architecture; Manufacturing; Service Lifecycle Management; SOA; SLM; Adaptability; Wandlungsf{\"a}higkeit},
language = {English},
cr-category = {D.2.11 Software Engineering Software Architectures,
D.2.13 Software Engineering Reusable Software},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The vision of knowledge-based and intelligent manufacturing systems is driving
the development of system architectures, which can seamlessly manage
information flows across multiple heterogeneous manufacturing systems and
provide the necessary services to support the execution of production
processes. Constantly changing business conditions and turbulent scenarios
force manufacturing companies to continuously adapt their business processes
and manufacturing systems. In such a context, a flexible infrastructure that
supports the full integration of processes and adapts its services is needed.
This paper presents an innovative semantic service framework that enables the
adoption of service lifecycle management (SLM) in an SOA-based integration
framework.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-40&engl=1}
}
@inproceedings {INPROC-2011-39,
author = {Stefan Silcher and Jorge Minguez and Bernhard Mitschang},
title = {{Adopting the Manufacturing Service Bus in a Service-based Product Lifecycle Management Architecture}},
booktitle = {Proceedings of the 44th International CIRP Conference on Manufacturing Systems: ICMS '11; Madison, Wisconsin, USA, May 31 - June 3, 2011},
publisher = {Online},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--6},
type = {Conference Paper},
month = {June},
year = {2011},
keywords = {Information; System Architecture; Product Lifecycle Management; Service Oriented Architecture; Enterprise Service Bus},
language = {English},
cr-category = {D.2.11 Software Engineering Software Architectures,
D.2.13 Software Engineering Reusable Software},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Service-oriented computing is nowadays a rising technology to implement
business processes in an efficient and flexible manner. This technology has a
great impact on manufacturing environments. The realization of Product
Lifecycle Management (PLM) with a Service Oriented Architecture (SOA) has many
benefits. Some advantages are a seamless and flexible integration of all
applications within PLM, including legacy systems, improved data provisioning
and a reduced complexity by using a common service-based integration
middleware, such as the Manufacturing Service Bus (MSB). In this paper the
integration of the MSB into the service-oriented PLM approach will be described
in detail.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-39&engl=1}
}
@inproceedings {INPROC-2011-37,
author = {Sylvia Radesch{\"u}tz and Marko Vrhovnik and Holger Schwarz and Bernhard Mitschang},
title = {{Exploiting the Symbiotic Aspects of Process and Operational Data for Optimizing Business Processes}},
booktitle = {Proc. of the 12th IEEE International Conference on Information Reuse and Integration (IRI 2011)},
address = {Las Vegas, USA},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--6},
type = {Conference Paper},
month = {August},
year = {2011},
language = {English},
cr-category = {H.2.4 Database Management Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {A profound analysis of all relevant business data in a company is necessary for
optimizing business processes effectively. Current analyses typically run
either on business process execution data or on operational business data.
Correlations among the separate data sets have to be found manually under big
effort. However, to achieve a more informative analysis and to fully optimize a
company’s business, an efficient consolidation of all major data sources is
indispensable. Recent matching algorithms are insufficient for this task since
they are restricted either to schema or to process matching. We present a new
matching framework to combine process data models and operational data models
(semi-)automatically for performing such a profound business analysis. We
describe this approach and its basic matching rules as well as an experimental
study that shows the achieved high recall and precision.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-37&engl=1}
}
@inproceedings {INPROC-2011-36,
author = {Oliver Schiller and Benjamin Schiller and Andreas Brodt and Bernhard Mitschang},
title = {{Native support of multi-tenancy in RDBMS for software as a service}},
booktitle = {EDBT},
publisher = {ACM},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {117--128},
type = {Conference Paper},
month = {January},
year = {2011},
language = {English},
cr-category = {H.2.1 Database Management Logical Design,
H.2 Database Management},
ee = {http://doi.acm.org/10.1145/1951365.1951382},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Software as a Service (SaaS) facilitates acquiring a huge number of small
tenants by providing low service fees. To achieve low service fees, it is
essential to reduce costs per tenant. For this, consolidating multiple tenants
onto a single relational schema instance turned out beneficial because of low
overheads per tenant and scalable man- ageability. This approach implements
data isolation between ten- ants, per-tenant schema extension and further
tenant-centric data management features in application logic. This is complex,
dis- ables some optimization opportunities in the RDBMS and repre- sents a
conceptual misstep with Separation of Concerns in mind. Therefore, we
contribute first features of a RDBMS to support tenant-aware data management
natively. We introduce tenants as first-class database objects and propose the
concept of a tenant con- text to isolate a tenant from other tenants. We
present a schema inheritance concept that allows sharing a core application
schema among tenants while enabling schema extensions per tenant. Fi- nally, we
evaluate a preliminary implementation of our approach.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-36&engl=1}
}
@inproceedings {INPROC-2011-27,
author = {Nazario Cipriani and Carlos L{\"u}bbe and Oliver D{\"o}rler},
title = {{NexusDSEditor - Integrated Tool Support for the Data Stream Processing Middleware NexusDS}},
booktitle = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW), 14. Fachtagung des GI-Fachbereichs ``Datenbanken und Informationssysteme'' (DBIS), 2.-4.3.2011 in Kaiserslautern, Germany},
editor = {Theo H{\"a}rder and Wolfgang Lehner and Bernhard Mitschang and Harald Sch{\"o}ning and Holger Schwarz},
publisher = {Lecture Notes in Informatics (LNI)},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
series = {Gesellschaft f{\"u}r Informatik (GI)},
volume = {180},
pages = {714--717},
type = {Conference Paper},
month = {March},
year = {2011},
isbn = {978-3-88579-274-1},
language = {English},
cr-category = {D.2.6 Software Engineering Programming Environments,
H.5.2 Information Interfaces and Presentation User Interfaces},
contact = {nazario.cipriani@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In this paper we present NexusDSEditor --- an integrated tool for the stream
processing middleware NexusDS. NexusDSEditor is an extension module for the
NexusEditor and supports developers with designing new streaming applications
by providing an integrated tool for orchestrating stream query graphs, define
the deployment of query graph fragments to execution nodes, and analyzing data
streams. In this paper we demonstrate these single steps and show how
NexusDSEditor supports developing streaming data applications for the NexusDS
platform by hiding complexity and providing an intuitive user interface.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-27&engl=1}
}
@inproceedings {INPROC-2011-26,
author = {Florian Niedermann and Sylvia Radesch{\"u}tz and Bernhard Mitschang},
title = {{Design-Time Process Optimization through Optimization Patterns and Process Model Matching}},
booktitle = {Proceedings of the 12th IEEE Conference on Commerce and Enterprise Computing (CEC)},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {48--55},
type = {Conference Paper},
month = {November},
year = {2011},
keywords = {Business Process Analytics; Business Process Design; Business Process Management; Business Process Optimization; Process Model Matching},
language = {English},
cr-category = {H.4.1 Office Automation},
ee = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=5708392},
contact = {florian.niedermann@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The goal of process design is the construction of a process model that is a
priori optimal w.r.t. the goal(s) of the business owning the process. Process
design is therefore a major factor in determining the process performance and
ultimately the success of a business. Despite this importance, the designed
process is often less than optimal. This is due to two major challenges: First,
since the design is an a priori ability, no actual execution data is available
to provide the foundations for design decisions. Second, since modeling
decision support is typically basic at best, the quality of the design largely
depends on the ability of business analysts to make the ”right” design choices.
To address these challenges, we present in this paper our deep Business
Optimization Platform that enables (semi-) automated process optimization
during process design based on actual execution data. Our platform achieves
this task by matching new processes to existing processes stored in a
repository based on similarity metrics and by using a set of formalized
best-practice process optimization patterns.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-26&engl=1}
}
@inproceedings {INPROC-2011-25,
author = {Florian Niedermann and Holger Schwarz},
title = {{Deep Business Optimization: Making Business Process Optimization Theory Work in Practice}},
booktitle = {Proceedings of the Conference on Business Process Modeling, Development and Support (BPMDS 2011)},
publisher = {Springer},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--10},
type = {Conference Paper},
month = {June},
year = {2011},
keywords = {Business Process Optimization, Optimization Techniques, Business Process Analytics, Data Mining, Tool Support},
language = {English},
cr-category = {H.4.1 Office Automation},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2011-25/INPROC-2011-25.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The success of most of today's businesses is tied to the efficiency and
effectiveness of their core processes. This importance has been recognized in
research, leading to a wealth of sophisticated process optimization and
analysis techniques. Their use in practice is, however, often limited as both
the selection and the application of the appropriate techniques are challenging
tasks. Hence, many techniques are not considered causing potentially
significant opportunities of improvement not to be implemented. This paper
proposes an approach to addressing this challenge using our deep Business
Optimization Platform. By integrating a catalogue of formalized optimization
techniques with data analysis and integration capabilities, it assists analysts
both with the selection and the application of the most fitting optimization
techniques for their specific situation. The paper presents both the concepts
underlying this platform as well as its prototypical implementation.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-25&engl=1}
}
@inproceedings {INPROC-2011-24,
author = {Florian Niedermann and Bernhard Maier and Sylvia Radesch{\"u}tz and Holger Schwarz and Bernhard Mitschang},
title = {{Automated Process Decision Making based on Integrated Source Data}},
booktitle = {Proceedings of the 14th International Conference on Business Information Systems (BIS 2011)},
editor = {Witold Abramowicz},
publisher = {Springer},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Business Information Processing},
pages = {1--10},
type = {Conference Paper},
month = {June},
year = {2011},
keywords = {Data Mining, Decision Automation, Data Integration, Business Process Management, Data-driven Processes},
language = {English},
cr-category = {H.4.1 Office Automation,
H.2.8 Database Applications,
H.5.2 Information Interfaces and Presentation User Interfaces},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The success of most of today's businesses is tied to the efficiency and
effectiveness of their core processes. Yet, two major challenges often prevent
optimal processes: First, the analysis techniques applied during the
optimization are inadequate and fail to include all relevant data sources.
Second, the success depends on the abilities of the individual analysts to spot
the right designs amongst a plethora of choices. Our deep Business Optimization
Platform addresses these challenges through specialized data integration,
analysis and optimization facilities. In this paper, we focus on how it uses
formalized process optimization patterns for detecting and implementing process
improvements.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-24&engl=1}
}
@inproceedings {INPROC-2011-23,
author = {Florian Niedermann and Sylvia Radesch{\"u}tz and Bernhard Mitschang},
title = {{Business Process Optimization using Formalized Optimization Patterns}},
booktitle = {Proceedings of the 14th International Conference on Business Information Systems (BIS 2011)},
editor = {Witold Abramowicz},
publisher = {Springer},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--10},
type = {Conference Paper},
month = {June},
year = {2011},
keywords = {Business Process Management; Business Process Optimization},
language = {English},
cr-category = {H.4.1 Office Automation},
contact = {florian.niedermann@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The success of most of today's businesses is tied to the efficiency and
effectiveness of their core processes. Yet, two major challenges often prevent
optimal processes: First, the analysis techniques applied during the
optimization are inadequate and fail to include all relevant data sources.
Second, the success depends on the abilities of the individual analysts to spot
the right designs amongst a plethora of choices. Our deep Business Optimization
Platform addresses these challenges through specialized data integration,
analysis and optimization facilities. In this paper, we focus on how it uses
formalized process optimization patterns for detecting and implementing process
improvements.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-23&engl=1}
}
@inproceedings {INPROC-2011-20,
author = {Carlos L{\"u}bbe and Andreas Brodt and Nazario Cipriani and Matthias Gro{\ss}mann and Bernhard Mitschang},
title = {{DiSCO: A Distributed Semantic Cache Overlay for Location-based Services}},
booktitle = {Proceedings of the 2011 Twelfth International Conference on Mobile Data Management},
address = {Washington, DC, USA},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {1--10},
type = {Conference Paper},
month = {January},
year = {2011},
keywords = {peer-to-peer; semantic caching},
language = {German},
cr-category = {C.2.4 Distributed Systems,
H.2.4 Database Management Systems},
contact = {carlos.luebbe@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Location-based services (LBS) have gained tremendous popularity with millions
of simultaneous users daily. LBS handle very large data volumes and face
enormous query loads. Both the data and the queries possess high locality:
spatial data is distributed very unevenly around the globe, query load is
different throughout the day, and users often search for similar things in the
same places. This causes high load peaks at the data tier of LBS, which may
seriously degrade performance. To cope with these load peaks, we present DiSCO,
a distributed semantic cache overlay for LBS. DiSCO exploits the spatial,
temporal and semantic locality in the queries of LBS and distributes frequently
accessed data over many nodes. Based on the Content-Addressable Network (CAN)
peer-to-peer approach, DiSCO achieves high scalability by partitioning data
using spatial proximity. Our evaluation shows that DiSCO significantly reduces
queries to the underlying data tier.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-20&engl=1}
}
@inproceedings {INPROC-2011-17,
author = {Christoph Stach and Andreas Brodt},
title = {{– vHike – A Dynamic Ride-sharing Service for Smartphones}},
booktitle = {Proceedings of the 12th international conference on Mobile data management},
address = {Lule{\aa}, Sweden},
publisher = {ACM},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--4},
type = {Conference Paper},
month = {June},
year = {2011},
keywords = {ride-sharing; trust; security; location-based.},
language = {English},
cr-category = {K.4 Computers and Society},
contact = {Senden Sie eine E-Mail an christoph.stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In times of lacking natural resources and increasing environmental pollution at
the same time, modern resource efficient kinds of personal transportation have
to be considered. Ride-sharing is maybe one of the most economical ways to
avouch permanent mobility without losing too much comfort. However, especially
dynamic ride-sharing is laden with many resistances including a lack of
security and a heavy scheduling and coordinating burden. Hence this paper
introduces an implementation of a system for dynamic ride-sharing called vHike
which should eliminate these barriers. With our demonstrator every interested
participant may test whether or not such a system can be viable and effective.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-17&engl=1}
}
@inproceedings {INPROC-2011-16,
author = {Andreas Brodt and Oliver Schiller and Sailesh Sathish and Bernhard Mitschang},
title = {{A mobile data management architecture for interoperability of resource and context data}},
booktitle = {Proceedings of the 2011 Twelveth International Conference on Mobile Data Management},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {1--6},
type = {Conference Paper},
month = {June},
year = {2011},
language = {English},
cr-category = {H.3.4 Information Storage and Retrieval Systems and Software,
H.3.5 Online Information Services,
H.2.4 Database Management Systems},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2011-16/INPROC-2011-16.pdf},
contact = {brodt@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Mobile devices have become general-purpose computers that are equipped with
sensors, constantly access the internet, and almost always accompany the user.
Consequently, devices manage many different kinds of data about the user's life
and context. There is considerable overlap in this data, as different
applications handle similar data domains. Applications often keep this data in
separated data silos. Web applications, which manage large amounts of personal
data, hardly share this data with other applications at all. This lack of
interoperability creates redundancy and impacts usability of mobile devices. We
present a data management architecture for mobile devices to support
interoperability between applications, devices and web applications at the data
management level. We propose a central on-device repository for applications to
share resource and context data in an integrated, extensible data model which
uses semantic web technologies and supports location data. A web browser
interface shares data with web applications, as controlled by a general
security model.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-16&engl=1}
}
@inproceedings {INPROC-2011-07,
author = {Peter Reimann and Michael Reiter and Holger Schwarz and Dimka Karastoyanova and Frank Leymann},
title = {{SIMPL - A Framework for Accessing External Data in Simulation Workflows}},
booktitle = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2011), 14. Fachtagung des GI-Fachbereichs „Datenbanken und Informationssysteme“ (DBIS), Proceedings, 02.-04. M{\"a}rz 2011, Kaiserslautern, Germany},
editor = {Gesellschaft f{\"u}r Informatik (GI)},
publisher = {Lecture Notes in Informatics (LNI)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Series of the Gesellschaft f{\"u}r Informatik (GI)},
volume = {180},
pages = {534--553},
type = {Conference Paper},
month = {March},
year = {2011},
isbn = {978-3-88579-274-1},
keywords = {Data Provisioning; Workflow; Scientific Workflow; Simulation Workflow; BPEL; WS-BPEL; SIMPL},
language = {English},
cr-category = {H.2.8 Database Applications,
H.4.1 Office Automation},
contact = {Peter Reimann Peter.Reimann@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {Adequate data management and data provisioning are among the most important
topics to cope with the information explosion intrinsically associated with
simulation applications. Today, data exchange with and between simulation
applications is mainly accomplished in a file-style manner. These files show
proprietary formats and have to be transformed according to the specific needs
of simulation applications. Lots of effort has to be spent to find appropriate
data sources and to specify and implement data transformations. In this paper,
we present SIMPL – an extensible framework that provides a generic and
consolidated abstraction for data management and data provisioning in
simulation workflows. We introduce extensions to workflow languages and show
how they are used to model the data provisioning for simulation workflows based
on data management patterns. Furthermore, we show how the framework supports a
uniform access to arbitrary external data in such workflows. This removes the
burden from engineers and scientists to specify low-level details of data
management for their simulation applications and thus boosts their
productivity.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-07&engl=1}
}
@inproceedings {INPROC-2011-06,
author = {Christoph Stach},
title = {{Saving time, money and the environment - vHike a dynamic ride-sharing service for mobile devices}},
booktitle = {Work in Progress workshop at PerCom 2011 (WIP of PerCom 2011)},
address = {Seattle, USA},
publisher = {IEEE},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {1--4},
type = {Conference Paper},
month = {March},
year = {2011},
keywords = {ride-sharing; trust; security; location-based},
language = {English},
cr-category = {K.4 Computers and Society},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In times of increasing traffic-related problems, such as air-pollution or
traffic jams, ride-sharing is one of the most environmentally friendly and
pleasantest ways to travel. The many benefits are offset by a multitude of
prejudices and fears, including security concerns and a heavy scheduling and
coordinating burden. For this reason this paper introduces vHike an easy-to-use
management system for dynamic ridesharing running on modern Smartphones. By the
use of techniques well-known from Web 2.0 social networks the threats and
social discomfort emanated by ride-sharing is mitigated. With vHike we want to
show that a proper designed social dynamic ride-sharing system can be feasible
and viable.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-06&engl=1}
}
@inproceedings {INPROC-2010-89,
author = {Florian Niedermann and Sylvia Radesch{\"u}tz and Bernhard Mitschang},
title = {{Deep Business Optimization: A Platform for Automated Process Optimization}},
booktitle = {Business Process and Service Science - Proceedings of ISSS and BPSC: BPSC'10; Leipzig, Germany, September 27th - October 1st, 2010},
editor = {Witold Abramowicz and Rainer Alt and Klaus-Peter F{\"a}ndrich and Bogdan Franczyk and Leszek A Maciaszek},
publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Informatics},
volume = {P177},
pages = {168--180},
type = {Conference Paper},
month = {September},
year = {2010},
isbn = {978-3-88579-271-0},
language = {English},
cr-category = {H.4.1 Office Automation,
H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The efficient and effective design, execution and adaption of its core
processes is vital for the success of most businesses and a major source of
competitive advantage. Despite this critical importance, process optimization
today largely depends on manual analytics and the ability of business analysts
to spot the ``right'' designs and areas of improvement.
This is because current techniques typically fall short in three areas: They
fail to integrate relevant data sources, they do not provide optimal analytical
procedures and they leave it up to the analyst to identify the best process
design.
Hence, we propose in this paper a platform that enables (semi-)automated
process optimization during the process design, execution and analysis stage,
based on insights from specialized analytical procedures running on an
integrated warehouse containing both process and operational data. We further
detail the analysis stage, as it provides the foundation for all other
optimization stages.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-89&engl=1}
}
@inproceedings {INPROC-2010-87,
author = {Carlos L{\"u}bbe},
title = {{Nexus VIS: Ein verteiltes Visualisierungstoolkit f{\"u}r mobile Anwendungen}},
booktitle = {6. GI/ITG KuVS Fachgespr{\"a}ch {\"O}rtsbezogene Anwendungen und Dienste``},
editor = {Alexander Zipf and Sandra Lanig and Michael Bauer},
address = {Heidelberg},
publisher = {Selbstverlag des Geographischen Instituts der Universit{\"a}t Heidelberg},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
series = {Heidelberger Geographische Bausteine},
volume = {18},
pages = {109--118},
type = {Workshop Paper},
month = {September},
year = {2010},
isbn = {987-3-88570-818-6},
language = {German},
cr-category = {H.2.4 Database Management Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Mobile Anwendungen visualisieren h{\"a}ufig Daten der n{\"a}heren Umgebung, die
entweder als Fakten in ortsbasierten Datenbanken eingetragen sind, oder von
Sensoren kontinuierlich geliefert werden. Dies erfordert sowohl Funktionalit{\"a}t
aus der Daten- und Datenstromverarbeitung als auch spezifische
Visualisierungsfunktionalit{\"a}t, welche weder Visualisierungswerkzeuge noch
Datenstromverarbeitungssysteme in kombinierter Form anbieten. Bei der
Ausf{\"u}hrung von Visualisierungsprozessen in einem heterogenen, verteilten Umfeld
m{\"u}ssen Ger{\"a}te verschiedenster Eigenschaften ber{\"u}cksichtigt werden.
Beispielsweise k{\"o}nnten resourcenbeschr{\"a}nkte Ger{\"a}te eine serverseitige
Verarbeitung von ressourcenintensiven Aufgaben erfordern, w{\"a}hrend
leistungsst{\"a}rkere Ger{\"a}te diese Aufgaben lokal erledigen k{\"o}nnten. Andererseits
k{\"o}nnte eine bestimmte Aufgabe, wie zum Beispiel die Erzeugung eines Bildes aus
einem r{\"a}umlichen Modell, f{\"u}r die Ausf{\"u}hrung auf einem Ger{\"a}t eine gewisse
Hardwareaustattung voraussetzen, wie etwa eine Graphics Processing Unit (GPU).
Um die Eigenschaften der verwendeten Ger{\"a}te optimal auszunutzen, ist eine
flexible, verteilte Verarbeitungsarchitektur w{\"u}nschenswert.
In dieser Arbeit pr{\"a}sentieren wir Nexus VIS, ein flexibles
Visualisierungstoolkit f{\"u}r kontextbasierte Anwendungen. Durch Erweiterung des
Datenstromverarbeitungssystems NexusDS um spezifische Visualisierungsoperatoren
erm{\"o}glichen wir es den Anwendungen, den Visualisierungsprozess als
kontinuierliche Anfrage auszuf{\"u}hren und somit Datenstromverarbeitung und
Visualisierung zu kombinieren. Dar{\"u}ber hinaus er{\"o}ffnet NexusDS als verteiltes
Datenstromverarbeitungssystem eine Vielzahl von m{\"o}glichen Ausf{\"u}hrungsszenarien,
bei denen die unterschiedlichen Eigenschaften der Ger{\"a}te ber{\"u}cksichtigt werden.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-87&engl=1}
}
@inproceedings {INPROC-2010-83,
author = {David Baureis and Donald Neumann and Jorge Minguez},
title = {{From a Product to a Product Service Supply Chain: A strategic Roadmap}},
booktitle = {Proceedings of the 12th International MITIP Conference: The Modern Information Technology in the Innovation Processes of the Industrial Enterprises, Aalborg, Denmark, August 29-31, 2010,},
publisher = {MITIP},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {148--156},
type = {Conference Paper},
month = {August},
year = {2010},
language = {English},
cr-category = {A.0 General Literature, General},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Product-service systems (PSS) have been claimed as an interesting strategy for
reaching sustainable differentiation. Migrating from a manufacturing company to
a solution provider through PSS offerings raises different challenges,
especially for the supply chain. This paper describes a road-map that helps a
manufacturing organization to: 1. evaluate a PSS as a strategic choice; 2.
define the service part of the product-service bundle and; 3. delineate the
most important supply chain design variables.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-83&engl=1}
}
@inproceedings {INPROC-2010-82,
author = {Christoph Stach},
title = {{Gamework - A customizable framework for pervasive games (Doctoral Colloquium)}},
booktitle = {ICPS '10: Proceedings of the 7th international conference on Pervasive services; Berlin, Germany, July 13 - 15, 2010},
editor = {Christian Becker and Shonali Krishnaswamy and Gergely Zaruba and Dipanjan Chakraborty and Mika Luimula and Jadwiga Indulska and Mohamed Medhat Gaber and Seng Wai Loke and Waluyo Agustinus Borgy},
publisher = {ACM Press},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {ACM International Conference Proceeding Series},
pages = {168--173},
type = {Conference Paper},
month = {July},
year = {2010},
isbn = {978-1-4503-0249-4},
keywords = {Mobile services, pervasive multi-player games; customizable framework},
language = {English},
cr-category = {D.2.13 Software Engineering Reusable Software,
H.2.8 Database Applications,
K.8 Personal Computing},
contact = {Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Location-based applications and services gain in importance as the spread of
mobile devices equipped with GPS sensors and internet capabilities increases.
Moreover, there is a trend towards customizable applications in order to let
the user determine e.g. the content and the look and feel. Nevertheless, there
exist only a few applications combining these features.
In this paper, we propose Gamework, a framework for customizable mobile
location-based games as a specialization for customizable mobile context-aware
applications and services. According to their programming skills players are
able to adapt a game with Gamework. This can be done by changing the context,
adding user-generated content, modifying the game-flow or implementing new
games from scratch by reusing existing modules of the framework or adding new
ones. Therefore our framework features a reuse-oriented development methodology
as well as a feedback loop analyzing all accruing user-generated content. The
results of this analysis are used to automatically optimize the game with
respect to frequent user-feedback. Finally, we will transfer the results back
to the more general area of mobile and context-aware applications and services.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-82&engl=1}
}
@inproceedings {INPROC-2010-79,
author = {Stefan Silcher and Jorge Minguez and Thorsten Scheibler and Bernhard Mitschang},
title = {{A Service-Based Approach for Next-Generation Product Lifecycle Management}},
booktitle = {Proceedings of the 11th IEEE International Conference on Information Reuse and Integration (IEEE IRI 2010) in Las Vegas, Nevada, USA.},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {219--224},
type = {Conference Paper},
month = {August},
year = {2010},
isbn = {978-1-4244-8098-2},
keywords = {Product Lifecycle Management; PLM; Service Oriented Architecture; SOA; Enterprise Service Bus; ESB},
language = {English},
cr-category = {D.2.11 Software Engineering Software Architectures,
D.2.13 Software Engineering Reusable Software},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Nowadays, one of the main challenges for companies is the effective management
of IT-systems. In times where requirements and companies change steadily, the
ITinfrastructure has to adopt these changes as well: new systems have to be
integrated or existing adapted. Even worse, these systems work together to
support business processes of a company and, thus, the infrastructures becomes
complex and difficult to manage.
The same situation is true for Product Lifecycle Management (PLM) that
accompanies a product development by means of interconnected IT systems running
on complex IT infrastructures. This paper introduces a viable solution to the
integration of all phases of PLM. An Enterprise Service Bus (ESB) is employed
as the service-based integration and communication infrastructure. Three
exemplary scenarios are introduced to describe the benefits of using an ESB as
compared to alternative PLM infrastructures. Furthermore, we introduce a
service hierarchy to enable value-added services to enhance PLM functionality.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-79&engl=1}
}
@inproceedings {INPROC-2010-45,
author = {Jorge Minguez and Dominik Lucke and Mihaly Jakob and Carmen Constantinescu and Bernhard Mitschang},
title = {{Introducing SOA into Production Environments - The Manufacturing Service Bus}},
booktitle = {Proceedings of the 43rd. CIRP International Conference on Manufacturing Systems},
address = {Vienna, Graz, Austria},
publisher = {neuer wissenschaftler Verlag},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1117--1124},
type = {Conference Paper},
month = {May},
year = {2010},
language = {English},
cr-category = {D.2.11 Software Engineering Software Architectures,
D.2.13 Software Engineering Reusable Software},
contact = {jorge.minguez@gsame.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Volatile markets and constantly changing business conditions force
manufacturing enterprises to continuously adapt their digital factory
information systems. In many industries there is still no backbone for the
integration of factory information systems. Current integration is based on
point-to-point interfaces, which is partially due to the cost of replacing
their established legacy systems. Furthermore the lack of flexibility prevents
business processes to improve their responsiveness and adapt workflows of
manufacturing to different turbulences. The principles of Service-Oriented
Architecture (SOA) and its associated technologies (e. g., Enterprise Service
Bus) and standards are key enabler and driver of the required flexibility. The
presented Manufacturing Service Bus is a SOA-based approach that extends the
Enterprise Service Bus capabilities in three areas: event management, factory
context semantics and change propagation workflows. The Manufacturing Service
Bus provides an event-driven platform for flexible integration of digital
factory applications.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-45&engl=1}
}
@inproceedings {INPROC-2010-43,
author = {Stamatia Rizou and Kai H{\"a}ussermann and Frank D{\"u}rr and Nazario Cipriani and Kurt Rothermel},
title = {{A system for distributed context reasoning.}},
booktitle = {Proceedings of ICAS’10: International Conference on Autonomous and Autonomic Systems},
publisher = {IEEE Computer Society Conference Publishing Services},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {84--89},
type = {Conference Paper},
month = {March},
year = {2010},
isbn = {978-0-7695-3970-6},
doi = {10.1109/ICAS.2010.21},
language = {English},
cr-category = {C.2.4 Distributed Systems},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2010-43/INPROC-2010-43.pdf,
http://dx.doi.org/10.1109/ICAS.2010.21},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Context aware systems use context information to adapt their behaviour
accordingly. In order to derive high level context information from low level
context, such as sensor values, context reasoning methods that correlate
observable context information, are necessary. Several context reasoning
mechanisms have been proposed in the literature. Usually these mechanisms are
centralized, leading to suboptimal utilization of network resources and poor
system performance in case of large-scale scenarios. Therefore to increase the
scalability of context reasoning systems the development of methods that
distribute the reasoning process is necessary. Existing distributed approaches
are method specific and do not provide a generic formalization for distributed
reasoning. In this paper we introduce a novel system which enables distributed
context reasoning in a generic way that is independent of the reasoning
algorithm.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-43&engl=1}
}
@inproceedings {INPROC-2010-37,
author = {Benjamin Leonhardi and Bernhard Mitschang and Rub{\'e}n Pulido and Christoph Sieb and Michael Wurst},
title = {{Augmenting OLAP exploration with dynamic advanced analytics.}},
booktitle = {Proceedings of the 13th International Conference on Extending Database Technology (EDBT 2010),Lausanne, Switzerland,March 22-26,2010},
address = {New York, NY, USA},
publisher = {ACM},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
pages = {687--692},
type = {Conference Paper},
month = {April},
year = {2010},
isbn = {978-1-60558-945-9},
language = {English},
cr-category = {H.2.4 Database Management Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Online Analytical Processing (OLAP) is a popular technique for explorative data
analysis. Usually, a fixed set of dimensions (such as time, place, etc.) is
used to explore and analyze various subsets of a given, multi-dimensional data
set. These subsets are selected by constraining one or several of the
dimensions, for instance, showing sales only in a given year and geographical
location. Still, such aggregates are often not enough. Important information
can only be discovered by combining several dimensions in a multidimensional
analysis. Most existing approaches allow to add new dimensions either
statically or dynamically. These approaches support, however, only the creation
of global dimensions that are not interactive for the user running the report.
Furthermore, they are mostly restricted to data clustering and the resulting
dimensions cannot be interactively refined.
In this paper we propose a technique and an architectural solution that is
based on an interaction concept for creating OLAP dimensions on subsets of the
data dynamically, triggered interactively by the user, based on arbitrary
multi-dimensional grouping mechanisms. This approach allows combining the
advantages of both, OLAP exploration and interactive multidimensional analysis.
We demonstrate the industry-strength of our solution architecture using a setup
of IBM® InfoSphere™ Warehouse data mining and Cognos® BI as reporting engine.
Use cases and industrial experiences are presented showing how insight derived
from data mining can be transparently presented in the reporting front end, and
how data mining algorithms can be invoked from the front end, achieving
closed-loop integration.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-37&engl=1}
}
@inproceedings {INPROC-2010-20,
author = {Christoph Stach},
title = {{Gamework - A customizable framework for pervasive games}},
booktitle = {IADIS International Conference Game and Entertainment Technologies (GET '10)},
publisher = {IADIS Press},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {1--8},
type = {Conference Paper},
month = {July},
year = {2010},
keywords = {Pervasive, mobile, customizable, games, framework, smartphones},
language = {English},
cr-category = {D.3.3 Programming Language Constructs and Features,
D.2.13 Software Engineering Reusable Software,
K.8 Personal Computing},
contact = {Christoph Stach christoph.stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The number of pervasive games is growing in which players have to interact with
their environment in order to control their avatar. While in the past they
required immense hardware equipment, nowadays, smartphones have all required
functionality built-in already. In addition, there is an upcoming trend towards
software, supported with new content and knowledge by an active community. By
combining these two trends, a new genre of computer games arises, targeting not
only established gamers but also new audiences.
We build four customization classes differing in required player's programming
knowledge, support by the games and scale of adaption potential. With Gamework
we present a framework for pervasive games on modern smartphones simplifying
user-driven game customization for the identified customization methods in this
paper. We also present two games implemented with this framework. Concluding,
we show the potential of these games as well as the potential of our framework.
We also report on early experiences in exploiting the customization approach of
our framework.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-20&engl=1}
}
@inproceedings {INPROC-2010-19,
author = {Carlos L{\"u}bbe and Andreas Brodt and Nazario Cipriani and Harald Sanftmann},
title = {{NexusVIS: A Distributed Visualization Toolkit for Mobile Applications (Demonstration)}},
booktitle = {Proceedings of the 8th Annual IEEE International Conference on Pervasive Computing and Communications (PerCom '10); Mannheim, Germany, March 2010},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {1--3},
type = {Conference Paper},
month = {March},
year = {2010},
isbn = {978-1-4244-5328-3},
keywords = {Visualisierung, Datenstromverarbeitung},
language = {English},
cr-category = {H.2.4 Database Management Systems},
contact = {carlos.luebbe@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Visualisation and Interactive Systems, Visualisation and Interactive Systems},
abstract = {Many mobile pervasive applications need to visualize information about the
user's geographic surroundings combined with data from sensors, which determine
the user's context. We demonstrate NexusVIS, a distributed visualization
toolkit for mobile applications. By building upon an existing data stream
processing system we enable applications to define distributed visualization
processes as continuous queries. This allows applications to define
visualization semantics descriptively. Moreover, NexusVIS is capable of
adapting the visual query at runtime, and thus allows to navigate in the
visualized scene both automatically and manually through user control.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-19&engl=1}
}
@inproceedings {INPROC-2010-122,
author = {Nicola H{\"o}nle and Matthias Gro{\ss}mann and Steffen Reimann and Bernhard Mitschang},
title = {{Usability analysis of compression algorithms for position data streams}},
booktitle = {18th ACM SIGSPATIAL International Symposium on Advances in Geographic Information Systems, ACM-GIS 2010, November 3-5, 2010, San Jose, CA, USA, Proceedings},
editor = {Agrawal Divyakant and Pusheng Zhang and Amr El Abbadi and Mohamed F. Mokbel},
publisher = {ACM},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {240--249},
type = {Conference Paper},
month = {November},
year = {2010},
keywords = {trajectory compression; sensor data stream},
language = {English},
cr-category = {H.2.8 Database Applications,
C.2.4 Distributed Systems,
F.2.2 Nonnumerical Algorithms and Problems,
G.1.2 Numerical Analysis Approximation},
contact = {nicola.hoenle@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {With the increasing use of sensor technology, the compression of sensor data
streams is getting more and more important to reduce both the costs of further
processing as well as the data volume for persistent storage. A popular method
for sensor data compression is to smooth the original measurement curve by an
approximated curve, which is bounded by a given maximum error value.
Measurement values from positioning systems like GPS are an interesting special
case, because they consist of two spatial and one temporal dimension. Therefore
various standard techniques for approximation calculations like regression or
line simplification algorithms cannot be directly applied.
In this paper, we portray our stream data management system NexusDS and an
operator for compressing sensor data. For the operator, we implemented various
compression algorithms for position data streams. We present the required
adaptations and the different characteristics of the compression algorithms as
well as the results of our evaluation experiments, and compare them with a map
matching approach, specifically developed for position data.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-122&engl=1}
}
@inproceedings {INPROC-2010-121,
author = {Jorge Minguez and Frank Ruthardt and Philipp Riffelmacher and Thorsten Scheibler and Bernhard Mitschang},
title = {{Service-based Integration in Event-driven Manufacturing Environments}},
booktitle = {WISE 2010 Workshops},
publisher = {Springer},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Computer Science},
volume = {6724},
pages = {0--14},
type = {Conference Paper},
month = {December},
year = {2010},
keywords = {Manufacturing; Service-oriented Computing; Service-oriented Architecture},
language = {English},
cr-category = {D.2.11 Software Engineering Software Architectures,
D.2.13 Software Engineering Reusable Software},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Constantly changing business conditions require a high level of flexibility in
business processes as well as an adaptive and fully interoperable IT
infrastructure in today’s manufacturing environments. The lack of flexibility
prevents manufacturing companies to improve their responsiveness and to adapt
their workflows to turbulent scenarios. In order to achieve highly flexible and
adaptive workflows, information systems in digital factories and shop floors
need to be integrated. The most challenging problem in such manufacturing
environments is the high heterogeneity of the IT landscape, where the
integration of legacy systems and information silos has lead to chaotic
architectures over the last two decades. In order to overcome this issue, we
present a flexible integration platform that allows a loose coupling of
distributed services in event-driven manufacturing environments. Our approach
enables a flexible communication between digital factory and shop floor
components by introducing a service bus architecture. Our solution integrates
an application-independent canonical message format for manufacturing events,
content-based routing and transformation services as well as event processing
workflows.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-121&engl=1}
}
@inproceedings {INPROC-2010-120,
author = {Andreas Brodt and Alexander Wobser and Bernhard Mitschang},
title = {{Resource Discovery Protocols for Bluetooth-Based Ad-hoc Smart Spaces: Architectural Considerations and Protocol Evaluation}},
booktitle = {Proceedings of the 2010 Eleventh International Conference on Mobile Data Management},
address = {Washington, DC, USA},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {145--150},
type = {Conference Paper},
month = {May},
year = {2010},
isbn = {978-0-7695-4048-1},
keywords = {Bluetooth; ad-hoc network; scatternet; protocol; evaluation; ad-hoc smart spaces; resource discovery; mobile device},
language = {English},
cr-category = {C.2.1 Network Architecture and Design,
C.2.2 Network Protocols},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2010-120/INPROC-2010-120.pdf},
contact = {brodt@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Ad-hoc smart spaces aim at resource-rich mobile devices sharing resource and
context data with others near them spontaneously. Thus, a device may, e.g.,
obtain a more complete context model by utilizing sensor data of its neighbors
via wireless communication, such as Bluetooth. The highly dynamic device
neighborhood challenges resource discovery, as the devices have to organize
themselves autonomously.
This paper evaluates different resource discovery protocols in Bluetooth-based
ad-hoc smart spaces. We simulate the protocols in different scenarios taking
into account the scatternet structure of the network. We suggest request
flooding for small settings and random replication for medium to large spaces.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-120&engl=1}
}
@inproceedings {INPROC-2010-119,
author = {Andreas Brodt and Daniela Nicklas and Bernhard Mitschang},
title = {{Deep integration of spatial query processing into native RDF triple stores}},
booktitle = {Proceedings of the 18th SIGSPATIAL International Conference on Advances in Geographic Information Systems},
address = {New York, NY, USA},
publisher = {ACM Press},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {33--42},
type = {Conference Paper},
month = {November},
year = {2010},
isbn = {978-1-4503-0428-3},
keywords = {GIS, RDF, SPARQL, spatial database, triple store},
language = {English},
cr-category = {H.2.8 Database Applications},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2010-119/INPROC-2010-119.pdf},
contact = {brodt@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Semantic Web technologies, most notably RDF, are well-suited to cope with
typical challenges in spatial data management including analyzing complex
relations between entities, integrating heterogeneous data sources and
exploiting poorly structured data, e.g., from web communities. Also, RDF can
easily represent spatial relationships, as long as the location information is
symbolic, i.e., represented by places that have a name. What is widely missing
is support for geographic and geometric information, such as coordinates or
spatial polygons, which is needed in many applications that deal with sensor
data or map data. This calls for efficient data management systems which are
capable of querying large amounts of RDF data and support spatial query
predicates. We present a native RDF triple store implementation with deeply
integrated spatial query functionality. We model spatial features in RDF as
literals of a complex geometry type and express spatial predicates as SPARQL
filter functions on this type. This makes it possible to use W3C's standardized
SPARQL query language as-is, i.e., without any modifications or extensions for
spatial queries. We evaluate the characteristics of our system on very large
data volumes.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-119&engl=1}
}
@inproceedings {INPROC-2010-08,
author = {Nazario Cipriani and Carlos L{\"u}bbe and Alexander Moosbrugger},
title = {{Exploiting Constraints to Build a Flexible and Extensible Data Stream Processing Middleware}},
booktitle = {The Third International Workshop on Scalable Stream Processing Systems},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {1--8},
type = {Workshop Paper},
month = {April},
year = {2010},
keywords = {Data Stream Processing; Stream Databases; Middleware Platforms for Data Management; P2P and Networked Data Management; Database Services and Applications},
language = {English},
cr-category = {C.2.4 Distributed Systems,
H.2.4 Database Management Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {A wide range of real-time applications process stream-based data. To process
this stream-based data in an application-independent manner, many stream
processing systems have been built. However, none of them reached a huge domain
of applications, such as databases did. This is due to the fact that they do
not consider the specific needs of real-time applications. For instance, an
application which visualizes stream-based data has stringent timing
constraints, or may even need a specific hardware environment to smoothly
process the data. Furthermore, users may even add additional constraints. E.g.,
for security reasons they may want to restrict the set of nodes that
participates in processing. Thus, constraints naturally arise on different
levels of query processing.
In this work we classify constraints that occur on different levels of query
processing. Furthermore we propose a scheme to classify the constraints and
show how these can be integrated into the query processing of the distributed
data stream middleware NexusDS.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-08&engl=1}
}
@inproceedings {INPROC-2010-05,
author = {Sylvia Radesch{\"u}tz and Florian Niedermann and Wolfgang Bischoff},
title = {{BIAEditor - Matching Process and Operational Data for a Business Impact Analysis}},
booktitle = {Proc. of the 13th International Conference on Extending Database Technology (EDBT 2010), Switzerland, March 22-26, 2010},
publisher = {ACM},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--4},
type = {Conference Paper},
month = {March},
year = {2010},
language = {English},
cr-category = {D.2.2 Software Engineering Design Tools and Techniques,
D.2.12 Software Engineering Interoperability,
H.4.1 Office Automation,
H.5.2 Information Interfaces and Presentation User Interfaces},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {A profound analysis of all relevant business data in the company is necessary
for optimizing business processes effectively. Current analyses typically
exclusively run on business process execution data or on operational business
data stored in a data warehouse. However, to achieve a more informative
analysis and to fully optimize a company’s business, a consolidation of all
major business data sources is indispensable. Recent matching algorithms are
insufficient for this task, since they are restricted either to schema or to
process matching. Our demonstration presents BIAEditor that allows to annotate
and match process variables and operational data models in order to perform
such a global business impact analysis.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2010-05&engl=1}
}
@inproceedings {INPROC-2009-94,
author = {Nazario Cipriani and Mike Eissele and Andreas Brodt and Matthias Gro{\ss}mann and Bernhard Mitschang},
title = {{NexusDS: A Flexible and Extensible Middleware for Distributed Stream Processing}},
booktitle = {IDEAS '09: Proceedings of the 2008 International Symposium on Database Engineering \& Applications},
editor = {ACM},
publisher = {ACM},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {152--161},
type = {Conference Paper},
month = {September},
year = {2009},
keywords = {Data Stream Processing; Stream Databases; Middleware Platforms for DataManagement; P2P and Networked DataManagement; Database Services and Applications},
language = {English},
cr-category = {C.2 Computer-Communication Networks,
C.5 Computer System Implementation,
H.2 Database Management},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {ABSTRACT
Techniques for efficient and distributed processing of huge, unbound data
streams have made some impact in the database community. Sensors and data
sources, such as position data of moving objects, continuously produce data
that is consumed, e.g., by location-aware applications. Depending on the domain
of interest, e.g. visualization, the processing of such data often depends on
domain-specific functionality. This functionality is specified in terms of
dedicated operators that may require specialized hardware, e.g. GPUs. This
creates a strong dependency which a data stream processing system must consider
when deploying such operators. Many data stream processing systems have been
presented so far. However, these systems assume homogeneous computing nodes, do
not consider operator deployment constraints, and are not designed to address
domain-specific needs.
In this paper, we identify necessary features that a exible and extensible
middleware for distributed stream processing of context data must satisfy. We
present NexusDS, our approach to achieve these requirements. In NexusDS, data
processing is specified by orchestrating data flow graphs, which are modeled as
processing pipelines of predefined and general operators as well as
custom-built and domain-specific ones. We focus on easy extensibility and
support for domain-specific operators and services that may even utilize
specific hardware available on dedicated computing nodes},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-94&engl=1}
}
@inproceedings {INPROC-2009-86,
author = {Matthias Grossmann and Nicola H{\"o}nle and Carlos L{\"u}bbe and Harald Weinschrott},
title = {{An Abstract Processing Model for the Quality of Context Data}},
booktitle = {Proceedings of the 1st International Workshop on Quality of Context},
publisher = {Springer},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {132--143},
type = {Workshop Paper},
month = {June},
year = {2009},
keywords = {uncertainty; inconsistency; trust; processing model},
language = {English},
cr-category = {H.2.8 Database Applications},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-86/INPROC-2009-86.pdf,
http://dx.doi.org/10.1007/978-3-642-04559-2_12},
contact = {Matthias.Grossmann@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Data quality can be relevant to many applications. Especially applications
coping with sensor data cannot take a single sensor value for granted. Because
of technical and physical restrictions each sensor reading is associated with
an uncertainty. To improve quality, an application can combine data values from
different sensors or, more generally, data providers. But as different data
providers may have diverse opinions about a certain real world phenomenon,
another issue arises: inconsistency. When handling data from different data
providers, the application needs to consider their trustworthiness. This
naturally introduces a third aspect of quality: trust. In this paper we propose
a novel processing model integrating the three aspects of quality: uncertainty,
inconsistency and trust.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-86&engl=1}
}
@inproceedings {INPROC-2009-68,
author = {Sylvia Radesch{\"u}tz and Bernhard Mitschang},
title = {{Extended Analysis Techniques For a Comprehensive Business Process Optimization}},
booktitle = {Proc. of the International Conference on Knowledge Management and Information Sharing (KMIS 2009), Portugal, 6.-8. Oktober, 2009.},
publisher = {Springer},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--6},
type = {Conference Paper},
month = {October},
year = {2009},
language = {English},
cr-category = {H.2.7 Database Administration,
H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Efficient adaption of a company’s business and its business processes to a
changing environment is a crucial ability to survive in today’s dynamic world.
For optimizing business processes, a profound analysis of all relevant business
data in the company is necessary. We define an extended data warehouse approach
that integrates process-related data and operational business data. This
extended data warehouse is used as the underlying data source for extended OLAP
and data mining analysis techniques for a comprehensive business process
optimization.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-68&engl=1}
}
@inproceedings {INPROC-2009-60,
author = {Andreas Brodt and Christoph Stach},
title = {{Mobile ortsbasierte Browserspiele}},
booktitle = {Tagungsband der 39. GI-Jahrestagung, 28.9. - 2.10.2009, Universit{\"a}t zu L{\"u}beck},
editor = {Gesellschaft f{\"u}r Informatik e.V.},
publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
series = {Lecture Notes in Informatics},
type = {Workshop Paper},
month = {September},
year = {2009},
keywords = {Pervasive Games; GPS; location-based games; browser games},
language = {German},
cr-category = {H.5.1 Multimedia Information Systems,
H.5.2 Information Interfaces and Presentation User Interfaces,
K.8 Personal Computing},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-60/INPROC-2009-60.pdf},
contact = {Andreas Brodt andreas.brodt@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Die Verbreitung von mobilen Ger{\"a}ten und Positionssensoren wie GPS erm{\"o}glicht
mobile ortsbasierte Spiele, in denen sich die Spieler physisch in ihrer Umwelt
bewegen m{\"u}ssen, um Einfluss auf das Spielgeschehen zu nehmen. Klassische
Computerspiele werden zunehmend als Browserspiel realisiert, d.h. der
Web-Browser des Spielers wird f{\"u}r die Benutzerschnittstelle verwendet. Indem
der Web-Browser um eine Kontextschnittstelle erweitert wird, kann einem
Browserspiel Zugriff auf die aktuelle Position des Spielers gew{\"a}hrt werden.
Dadurch wird es m{\"o}glich, mobile ortsbasierte Spiele im Web-Browser zu spielen.
In diesem Papier besch{\"a}ftigen wir uns mit dem Eigenschaften mobiler
ortsbasierter Browserspiele. Wir stellen zwei Beispiele vor, anhand derer wir
untersuchen, welche Einfl{\"u}sse mobile ortsbasierte Spiele auf das Spielkonzept
haben und welche technischen Konsequenzen sich daraus ergeben. Abschlie{\ss}end
pr{\"a}sentieren wir ein Framework zur Entwicklung mobiler ortsbasierter Spiele.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-60&engl=1}
}
@inproceedings {INPROC-2009-53,
author = {Nazario Cipriani and Matthias Wieland and Matthias Grossmann and Daniela Nicklas},
title = {{Tool Support for the Design and Management of Spatial Context Models}},
booktitle = {Proc. of the 13th East European Conference on Advances in Databases and Information Systems (ADBIS 2009)},
address = {Riga, Lativa},
publisher = {Springer},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
type = {Conference Paper},
month = {September},
year = {2009},
language = {English},
cr-category = {H.2.4 Database Management Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {A central task in the development of context-aware applications is the modeling
and management of complex context information. In this paper, we present the
NexusEditor, which eases this task by providing a graphical user interface to
design schemas for spatial context models, interactively create queries, send
them to a server and visualize the results. One main contribution is to show
how schema awareness can improve such a tool: the NexusEditor dynamically
parses the underlying data model and provides additional syntactic checks,
semantic checks, and short-cuts based on the schema information. Furthermore,
the tool helps to design new schema definitions based on the existing ones,
which is crucial for an iterative and user-centric development of context-aware
applications. Finally, it provides interfaces to existing information spaces
and visualization tools for spatial data like GoogleEarth.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-53&engl=1}
}
@inproceedings {INPROC-2009-47,
author = {Andreas Brodt and Sailesh Sathish},
title = {{Together we are strong -- Towards ad-hoc smart spaces}},
booktitle = {Proceedings of the 7th Annual IEEE International Conference on Pervasive Computing and Communications (PerCom '09). Galveston, TX, USA. March 2009},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {1--4},
type = {Conference Paper},
month = {January},
year = {2009},
keywords = {ad-hoc; smart space; mobile},
language = {English},
cr-category = {H.2.8 Database Applications,
H.3.4 Information Storage and Retrieval Systems and Software,
H.3.5 Online Information Services},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-47/INPROC-2009-47.pdf,
http://www.nexus.uni-stuttgart.de/},
contact = {andreas.brodt@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Today's mobile devices feature many resources, sensors and data, which make it
possible for them to adapt to their environment. However, they would have a lot
more resources at hand, and thus could better adapt to their context, if they
used their communication capabilities to share their resources with others near
them. In this paper we propose the concept of ad-hoc smart spaces, which
enables mobile devices to utilize resources owned by other devices in physical
proximity. This imposes a number of challenges as cooperation partners can join
and leave any time and as devices are highly heterogeneous. We propose a system
architecture for ad-hoc smart spaces and present a prototypic implementation.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-47&engl=1}
}
@inproceedings {INPROC-2009-46,
author = {Eva Fenrich and Andreas Brodt and Daniela Nicklas},
title = {{WODCA: A MOBILE, WEB-BASED FIELD SAMPLING SUPPORT SYSTEM}},
booktitle = {Proceedings of the 8th international conference on Hydroinformatics, HEIC 2009},
publisher = {International Water Association},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {1--10},
type = {Conference Paper},
month = {January},
year = {2009},
keywords = {GPS, DCCI, Field sampling,},
language = {English},
cr-category = {H.3.5 Online Information Services},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Efficient data acquisition methods can save time and money in field
measurements. As most field data in aquatic systems is time dependent, it is
important to minimize sampling errors as measurements for a certain time and
place cannot be re-taken. For larger measurement campaigns with different teams
at distributed sampling sites, information about data from other teams as well
as environmental data like gauge, tidal or upstream weather information could
help to interpret measurement results already on site. We propose WODCA (Water
Observation Data Collecting Application), a mobile, online measurement support
system. The measurements are sent wirelessly to a geographic database that
provides back an integrated view on already taken measurements of all mobile
parties, augmented with online environmental data like gauge data or tidal
information. This eases the collaboration between different mobile measurement
teams. GPS integration and map based data input can help to avoid location
errors. The proposed system could assist materially in the improvement of field
data quality and sampling comfort.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-46&engl=1}
}
@inproceedings {INPROC-2009-45,
author = {Andreas Brodt and Nazario Cipriani},
title = {{NexusWeb - eine kontextbasierte Webanwendung im World Wide Space}},
booktitle = {Datenbanksysteme in Business, Technologie und Web (BTW 2009), 13. Fachtagung des GI-Fachbereichs ``Datenbanken und Informationssysteme'' (DBIS), Proceedings, 2.-6. M{\"a}rz 2009, M{\"u}nster, Germany},
editor = {GI},
publisher = {GI},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
series = {Lecture Notes in Informatics},
volume = {144},
pages = {588--591},
type = {Conference Paper},
month = {March},
year = {2009},
isbn = {978-3-88579-238-3},
keywords = {DCCI, location-based services, Nokia N810, Internet Tablet, GPS, Google Maps, Nexus, AWQL, AWML},
language = {German},
cr-category = {H.3.5 Online Information Services},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-45/INPROC-2009-45.pdf},
contact = {Andreas Brodt andreas.brodt@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Wir pr{\"a}sentieren NexusWeb, eine kontextbasierte Webanwendung, die ein
Umgebungsmodell des Benutzers zeichnet. NexusWeb bezieht dazu Kontextdaten des
World Wide Space, einer F{\"o}deration verschiedenster Datenanbieter. Die
GPS-Position des Benutzers gelangt {\"u}ber eine standardisierte
Browsererweiterung in die Webanwendung und steuert die Ansicht von NexusWeb.
Wir zeigen verschiedene Szenarien sowohl auf einem mobilen Ger{\"a}t als auch auf
einem gew{\"o}hnlichen Laptop.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-45&engl=1}
}
@inproceedings {INPROC-2009-29,
author = {Fabian Kaiser and Holger Schwarz and Mih{\'a}ly Jakob},
title = {{Using Wikipedia-based conceptual contexts to calculate document similarity}},
booktitle = {ICDS2009: Proceedings of the 3rd International Conference on Digital Society},
address = {Cancun, Mexico},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {322--327},
type = {Conference Paper},
month = {February},
year = {2009},
language = {English},
cr-category = {H.3 Information Storage and Retrieval,
H.3.3 Information Search and Retrieval},
ee = {http://dx.doi.org/10.1109/ICDS.2009.7},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Rating the similarity of two or more text documents is an essential task in
information retrieval. For example, document similarity can be used to rank
search engine results, cluster documents according to topics etc. A major
challenge in calculating document similarity originates from the fact that two
documents can have the same topic or even mean the same, while they use
different wording to describe the content. A sophisticated algorithm therefore
will not directly operate on the texts but will have to find a more abstract
representation that captures the texts' meaning. In this paper, we propose a
novel approach for calculating the similarity of text documents. It builds on
conceptual contexts that are derived from content and structure of the
Wikipedia hypertext corpus.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-29&engl=1}
}
@inproceedings {INPROC-2009-130,
author = {Jorge Minguez and Mihaly Jakob and Uwe Heinkel and Bernhard Mitschang},
title = {{A SOA-based approach for the integration of a data propagation system}},
booktitle = {IRI'09: Proceedings of the 10th IEEE international conference on Information Reuse \& Integration},
publisher = {IEEE Press},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {47--52},
type = {Conference Paper},
month = {August},
year = {2009},
isbn = {978-1-4244-4114-3},
language = {German},
cr-category = {D.2.11 Software Engineering Software Architectures,
D.2.13 Software Engineering Reusable Software},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Major challenges that companies face nowadays are extremely volatile markets, a
globally distributed supplier network and constantly changing business
environments. These circumstances demand a high level of agility and
extraordinary flexibility in the business modeling and the organizational
structures of a company as well as adaptive and interoperable IT systems. In
order to meet these requirements an integration of systems needs to be
achieved.
A possible solution for this problem is Champagne, which is a data propagation
system that ensures the interoperability of enterprise applications at the data
level. However, Champagne provides a tightly-coupled integration of
applications and its architecture lacks the needed flexibility to link business
processes.
These deficiencies can be overcome with the adoption of a service-oriented
architecture (SOA), based on loosely-coupled services, which enable a higher
level of flexibility and interoperability. Therefore, we explore in this paper
a number of options to reuse and integrate Champagne into a service-oriented
architecture in order to benefit from SOA principles.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-130&engl=1}
}
@inproceedings {INPROC-2009-126,
author = {Jorge M$\backslash$'$\backslash$inguez and Mih$\backslash$'aly Jakob and Uwe Heinkel and Bernhard Mitschang},
title = {{A SOA-based approach for the integration of a data propagation system}},
booktitle = {IRI'09: Proceedings of the 10th IEEE international conference on Information Reuse \& Integration},
publisher = {IEEE Press},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {47--52},
type = {Conference Paper},
month = {August},
year = {2009},
isbn = {978-1-4244-4114-3},
language = {English},
cr-category = {D.2.11 Software Engineering Software Architectures,
D.2.13 Software Engineering Reusable Software},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Major challenges that companies face nowadays are extremely volatile markets, a
globally distributed supplier network and constantly changing business
environments. These circumstances demand a high level of agility and
extraordinary flexibility in the business modeling and the organizational
structures of a company as well as adaptive and interoperable IT systems. In
order to meet these requirements an integration of systems needs to be
achieved.
A possible solution for this problem is Champagne, which is a data propagation
system that ensures the interoperability of enterprise applications at the data
level. However, Champagne provides a tightly-coupled integration of
applications and its architecture lacks the needed flexibility to link business
processes.
These deficiencies can be overcome with the adoption of a service-oriented
architecture (SOA), based on loosely-coupled services, which enable a higher
level of flexibility and interoperability. Therefore, we explore in this paper
a number of options to reuse and integrate Champagne into a service-oriented
architecture in order to benefit from SOA principles.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-126&engl=1}
}
@inproceedings {INPROC-2009-125,
author = {Jorge M$\backslash$'$\backslash$inguez and Mih$\backslash$'aly Jakob and Uwe Heinkel and Bernhard Mitschang},
title = {{A SOA-based approach for the integration of a data propagation system}},
booktitle = {IRI'09: Proceedings of the 10th IEEE international conference on Information Reuse \& Integration},
publisher = {IEEE Press},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {47--52},
type = {Conference Paper},
month = {August},
year = {2009},
isbn = {978-1-4244-4114-3},
language = {English},
cr-category = {D.2.11 Software Engineering Software Architectures,
D.2.13 Software Engineering Reusable Software},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Major challenges that companies face nowadays are extremely volatile markets, a
globally distributed supplier network and constantly changing business
environments. These circumstances demand a high level of agility and
extraordinary flexibility in the business modeling and the organizational
structures of a company as well as adaptive and interoperable IT systems. In
order to meet these requirements an integration of systems needs to be
achieved.
A possible solution for this problem is Champagne, which is a data propagation
system that ensures the interoperability of enterprise applications at the data
level. However, Champagne provides a tightly-coupled integration of
applications and its architecture lacks the needed flexibility to link business
processes.
These deficiencies can be overcome with the adoption of a service-oriented
architecture (SOA), based on loosely-coupled services, which enable a higher
level of flexibility and interoperability. Therefore, we explore in this paper
a number of options to reuse and integrate Champagne into a service-oriented
architecture in order to benefit from SOA principles.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-125&engl=1}
}
@inproceedings {INPROC-2009-120,
author = {Nicola H{\"o}nle and Matthias Gro{\ss}mann and Dominik Lucke and Engelbert Westk{\"a}mper},
title = {{Historisierung und Analyse von Stromdaten in einem Data Warehouse am Beispiel der Smart Factory}},
booktitle = {Beitr{\"a}ge der 39. Jahrestagung der Gesellschaft f{\"u}r Informatik e.V. (GI)},
editor = {Stefan Fischer and Erik Maehle and R{\"u}diger Reischuk},
address = {Bonn},
publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
series = {Lecture Notes in Informatics},
volume = {P-154},
pages = {246--246},
type = {Workshop Paper},
month = {September},
year = {2009},
isbn = {978-3-88579-248-2},
language = {German},
cr-category = {H.2.8 Database Applications,
J.7 Computers in Other Systems},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Industrielle Fertigung und Fabrikbetrieb (IFF);
University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Die heute geforderte schnelle Anpassung von Fabriken an sich st{\"a}ndig {\"a}ndernde
Herausforderungen des Marktes erfordert aktuelle und korrekte Informationen
{\"u}ber den Zustand der Fabrik. Dabei fallen Stromdaten unterschiedlicher Herkunft
an (z.B. Betriebsdaten aus Maschinen oder Sensormesswerte, verkn{\"u}pft mit
virtuellen Informationen wie z.B. Workflows), zu deren Erfassung und
Integration ein System zum Stromdatenmanagement ben{\"o}tigt wird. Dazu setzen wir
ein spezielles Datenstromverarbeitungssystem ein.
Im Teilprojekt „Smart Factory“ des Sonderforschungsbereichs 627 Nexus werden
Anwendungen f{\"u}r die Produktion erforscht, die eine kontextbezogene
Unterst{\"u}t-zung von Mitarbeitern und Maschinen erm{\"o}glichen.
Die anfallenden Daten in der Smart Factory sowohl von Sensoren als auch von
Leitrechnern und Maschinen k{\"o}nnen nicht nur zur Unterst{\"u}tzung aktueller
Aufgaben verwendet werden, sondern aus der Analyse der Datenhistorien lassen
sich auch wertvolle Erkenntnisse gewinnen, die wiederum r{\"u}ckwirkend den
Alltagsbetrieb in der Fabrik verbessern k{\"o}nnen.
In diesem Artikel stellen wir den Entwurf eines Data Warehouses vor, das zur
Speicherung und Analyse von Datenhistorien aus der Smart Factory entworfen
wurde, und wir skizzieren, wie mit Hilfe des verwendeten
Datenstromverarbeitungssystems das kontinuierliche Laden der Daten realisiert
werden kann.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-120&engl=1}
}
@inproceedings {INPROC-2009-119,
author = {Nazario Cipriani and Carlos L{\"u}bbe},
title = {{Ausnutzung von Restriktionen zur Verbesserung des Deployment-Vorgangs des Verteilten Datenstromverarbeitungssystems NexusDS}},
booktitle = {Beitr{\"a}ge der 39. Jahrestagung der Gesellschaft f{\"u}r Informatik e.V. (GI)},
editor = {Stefan Fischer and Erik Maehle and R{\"u}diger Reischug},
address = {Bonn},
publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
series = {Lecture Notes in Informatics},
volume = {P-154},
pages = {240--240},
type = {Workshop Paper},
month = {September},
year = {2009},
isbn = {978-3-88579-248-2},
keywords = {Data Stream Processing; Stream Databases; Middleware Platforms for Data Management; P2P and Networked Data Management; Database Services and Applications},
language = {German},
cr-category = {C.2 Computer-Communication Networks,
H.2 Database Management},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Kontextsensitive Informationssysteme verarbeiten h{\"a}ufig Daten der n{\"a}heren
Umgebung, die mit Hilfe von Sensoren kontinuierlich erhoben werden. F{\"u}r die
Verarbeitung kontinuierlicher Datenstr{\"o}me k{\"o}nnen Datenstromverarbeitungssysteme
eingesetzt werden. Jedoch m{\"u}ssen diese im Bereich der Kontextdatenverarbeitung
mit einem heterogenen Umfeld zurechtkommen. Je nach technischer Ausstattung der
physischen Umgebung sollten sie sich an die dort geltenden Bedingungen bzw.
Restriktionen anpassen. F{\"u}r verteilte Datenstromverarbeitungssysteme ist die
Anfrageverteilung in einem solchen Umfeld eine besondere Herausforderung, denn
das System muss Restriktionen auf verschiedenen Ebenen ber{\"u}cksichtigen. So
k{\"o}nnte ein Teil der Anfrage gewisse Anforderungen an seine Ausf{\"u}hrungsumgebung
haben, wie spezialisierte Hardware, oder es k{\"o}nnte aus Sicherheitsgr{\"u}nden
notwendig sein, die Anfrage oder einen Anfrageteil in einer sicheren
Ausf{\"u}hrungsumgebung auszuf{\"u}hren.
In diesem Papier klassifizieren wir Restriktionen, die auf verschiedenen Ebenen
der Anfrageverarbeitung des Systems vorkommen k{\"o}nnen. {\"U}berdies stellen wir ein
Konzept zur Modellierung der Restriktionsklassen vor und zeigen wie diese in
der Anfrageverarbeitung des verteilten Datenstromverarbeitungssystems NexusDS
ber{\"u}cksichtigt werden.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-119&engl=1}
}
@inproceedings {INPROC-2009-07,
author = {Ralph Lange and Nazario Cipriani and Lars Geiger and Matthias Gro{\ss}mann and Harald Weinschrott and Andreas Brodt and Matthias Wieland and Stamatia Rizou and Kurt Rothermel},
title = {{Making the World Wide Space Happen: New Challenges for the Nexus Context Platform}},
booktitle = {Proceedings of the 7th Annual IEEE International Conference on Pervasive Computing and Communications (PerCom '09). Galveston, TX, USA. March 2009},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {1--4},
type = {Conference Paper},
month = {March},
year = {2009},
keywords = {Nexus; context; mobile context-aware applications; context-awareness; context management; World Wide Space; stream-processing; situation recognition; reasoning; workflows; quality of context},
language = {English},
cr-category = {H.2.8 Database Applications,
H.3.4 Information Storage and Retrieval Systems and Software,
H.3.5 Online Information Services},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-07/INPROC-2009-07.pdf,
http://www.nexus.uni-stuttgart.de/,
http://dx.doi.org/10.1109/PERCOM.2009.4912782},
contact = {ralph.lange@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Context-aware applications rely on models of the physical world. Within the
Nexus project, we envision a World Wide Space which provides the conceptual and
technological framework for integrating and sharing such context models in an
open, global platform of context providers. In our ongoing research we tackle
important challenges in such a platform including distributed processing of
streamed context data, situation recognition by distributed reasoning,
efficient management of context data histories, and quality of context
information. In this paper we discuss our approach to cope with these
challenges and present an extended Nexus architecture.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-07&engl=1}
}
@inproceedings {INPROC-2008-91,
author = {Lu Jing and Mitschang Bernhard},
title = {{An XQuery-based Trigger Service to Bring Consistency Management to Data Integration Systems}},
booktitle = {10th International Conference on Information Integration and Web-based Applications \& Services (iiWAS2008). Linz, Austria, November 24 - 26, 2008.},
address = {Linz, Austria},
publisher = {ACM Press},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--8},
type = {Conference Paper},
month = {November},
year = {2008},
isbn = {978-1-60558-349-5},
language = {English},
cr-category = {H.2.7 Database Administration},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Nowadays XML-based data integration systems are accepted as data service
providers on the web. In order to make such a data integration system fully
equipped with data manipulation capabilities, programming frameworks which
support update at the integration level are being developed. When the user is
permitted to submit updates, it is necessary to establish the best possible
data consistency in the whole data integration system. To that extend, we
present an approach based on an XQuery trigger service. We define an XQuery
trigger model together with its semantics. We report on the integration of the
XQuery trigger service into the overall architecture and discuss details of the
execution model. Experiments show that data consistency is enforced easily,
efficiently and conveniently at the global level.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-91&engl=1}
}
@inproceedings {INPROC-2008-86,
author = {Sylvia Radesch{\"u}tz and Bernhard Mitschang},
title = {{An Annotation Approach for the Matching of Process Variables and Operational Business Data Models}},
booktitle = {Proc. of the 21st International Conference on Computer Applications in Industry and Engineering (CAINE 2008)},
address = {Honolulu, USA},
publisher = {The International Society for Computers and Their Applications},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--6},
type = {Conference Paper},
month = {November},
year = {2008},
language = {English},
cr-category = {D.2.2 Software Engineering Design Tools and Techniques,
D.2.12 Software Engineering Interoperability,
H.4.1 Office Automation,
H.5.2 Information Interfaces and Presentation User Interfaces},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Efficient adaptation to new situations of a company’s business and its business
processes plays an important role for achieving advantages in competition to
other companies. For an optimization of business processes, a profound analysis
of all relevant business data in the company is necessary. Analyses typically
specialize either on process analysis or on data warehousing of operational
business data. However, to achieve a significantly more detailed analysis in
order to fully optimize a company’s business, a consolidation of all major
business data sources is indispensable.
This paper introduces an approach that allows consolidating process variables
and operational data models in a semi-automatic manner. In order to do this, a
semantic annotation is applied. In this paper, we focus on an ontology-based
annotation of the operational data in data warehouses, show how it is realized
in a tool and discuss its general usability in other areas.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-86&engl=1}
}
@inproceedings {INPROC-2008-82,
author = {Frank Wagner and Kathleen Krebs and Cataldo Mega and Bernhard Mitschang and Norbert Ritter},
title = {{Email Archiving and Discovery as a Service}},
booktitle = {Intelligent Distributed Computing, Systems and Applications; Proceedings of the 2nd International Symposium on Intelligent Distributed Computing: IDC 2008; Catania, Italy},
editor = {Costin Badica and Giuseppe Mangioni and Vincenza Carchiolo and Dumitru Dan Burdescu},
publisher = {Springer-Verlag},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Studies in Computational Intelligence},
volume = {162},
pages = {197--206},
type = {Conference Paper},
month = {September},
year = {2008},
isbn = {978-3-540-85256-8},
language = {English},
cr-category = {C.2.4 Distributed Systems,
H.3.2 Information Storage,
H.3.4 Information Storage and Retrieval Systems and Software},
contact = {Frank Wagner frank.wagner@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Corporate governance and legislative regulations are forcing companies to
extend their IT infrastructure by Email Archive and Discovery (EAD) systems for
compliance reasons. Praxis shows that every installation is di??erent from
another; not only in terms of the execution infrastructure, but also in terms
of e.g. document and archiving procedures that map a company’s own business
rules. As a consequence, EAD systems have to be highly customizable to their
intended usages.
For this purpose, we propose a service-oriented approach at various levels of
detail that, on one hand, allows for describing EAD properties at the abstract
(service) level and, on the other hand, supports the appropriate mapping of
these services to the existing execution infrastructure. In this paper, we
focus on the development and (architectural) design of an EAD system, which is
well suited to fulfill these requirements. On the long run, we consider this
solution as an important step on the way to an e??ective distributed and
scalable approach, which, as we think, can be achieved by appropriate
mechanisms of automatic workload management and dynamic provisioning of EAD
services based on e.g. grid technology.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-82&engl=1}
}
@inproceedings {INPROC-2008-81,
author = {Frank Wagner and Kathleen Krebs and Cataldo Mega and Bernhard Mitschang and Norbert Ritter},
title = {{Towards the Design of a Scalable Email Archiving and Discovery Solution}},
booktitle = {Proceedings of the 12th East-European Conference on Advances in Databases and Information Systems},
editor = {Paolo Atzeni and Albertas Caplinskas and Hannu Jaakkola},
publisher = {Sptringer-Verlag},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Lecture Notes in Computer Science},
pages = {305--320},
type = {Conference Paper},
month = {September},
year = {2008},
language = {English},
cr-category = {C.2.4 Distributed Systems,
H.3.2 Information Storage,
H.3.4 Information Storage and Retrieval Systems and Software},
contact = {Frank Wagner frank.wagner@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In this paper we propose a novel approach to specialize a general purpose
Enterprise Content Management (ECM) System into an Email Archiving and
Discovery (EAD) System. The magnitude and range of compliance risks associated
with the management of EAD is driving investment in the development of more
effective and efficient approaches to support regulatory compliance, legal
discovery and content life-cycle needs. Companies must recognize and address
requirements like legal compliance, electronic discovery, and document
retention management. What is needed today are EAD systems capable to process
very high message ingest rates, support distributed full text indexing, and
allow forensic search such to support litigation cases. All this must be
provided at lowest cost with respect to archive management and administration.
In our approach we introduce a virtualized ECM repository interface where the
key content repository components are wrapped into a set of tightly coupled
Grid service entities, such to achieve scale-out on a cluster of commodity
blade hardware that is automatically configured and dynamically provisioned. By
doing so we believe, we can leverage the strength of Relational Database
Management Systems and Full Text Indexes in a managed clustered environment
with minimal operational overhead.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-81&engl=1}
}
@inproceedings {INPROC-2008-51,
author = {Nicola H{\"o}nle and Matthias Grossmann and Daniela Nicklas and Bernhard Mitschang},
title = {{Preprocessing Position Data of Mobile Objects}},
booktitle = {Proceedings of the 9th International Conference on Mobile Data Management (MDM'08); Beijing, China, April 27-30, 2008.},
publisher = {IEEE computer society},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {1--8},
type = {Conference Paper},
month = {April},
year = {2008},
isbn = {978-0-7695-3154-0},
language = {German},
cr-category = {H.2.8 Database Applications,
G.1.2 Numerical Analysis Approximation},
contact = {nicola.hoenle@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {We present the design and implementation of a component for the preprocessing
of position data taken from moving objects. The movement of mobile objects is
represented by piecewise functions over time that approximate the real object
movement and significantly reduce the initial data volume such that efficient
storage and analysis of object trajectories can be achieved. The maximal
acceptable deviation---an input parameter of our algorithms---of the
approximations also includes the uncertainty of the position sensor
measurements. We analyze and compare five different lossy preprocessing
methods. Our results clearly indicate that even with simple approaches, a more
than sufficient overall performance can be achieved.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-51&engl=1}
}
@inproceedings {INPROC-2008-50,
author = {Andreas Brodt and Daniela Nicklas and Sailesh Sathish and Bernhard Mitschang},
title = {{Context-Aware Mashups for Mobile Devices}},
booktitle = {Web Information Systems Engineering – WISE 2008 9th International Conference on Web Information Systems Engineering, Auckland, New Zealand, September 1-3, 2008, Proceedings},
publisher = {Springer-Verlag},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
series = {Lecture Notes in Computer Science},
type = {Conference Paper},
month = {January},
year = {2008},
keywords = {mashup, location-based services, Delivery Context Client Interfaces, DCCI, AJAX, context provisioning},
language = {English},
cr-category = {D.2.11 Software Engineering Software Architectures,
D.2.12 Software Engineering Interoperability,
H.5.1 Multimedia Information Systems,
H.5.4 Hypertext/Hypermedia,
H.2.5 Heterogeneous Databases},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2008-50/INPROC-2008-50.pdf},
contact = {andreas.brodt@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {With the Web 2.0 trend and its participation of end-users more and more data
and information services are online accessible, such as web sites, Wikis, or
web services. So-called mashups---web applications that integrate data from
more than one source into an integrated service---can be easily realized using
scripting languages. Also, mobile devices are increasingly powerful, have
ubiquitous access to the Web and feature local sensors, such as GPS. Thus,
mobile applications can adapt to the mobile user's current situation.
We examine how context-aware mashups can be created. One challenge is the
provisioning of context data to the mobile application. For this, we discuss
different ways to integrate context data, such as the user's position, into web
applications. Moreover, we assess different data formats and the overall
performance. Finally, we present the Telar Mashup Platform, a client-server
solution for location-based mashups for mobile devices such as the Nokia N810
Internet Tablet.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-50&engl=1}
}
@inproceedings {INPROC-2008-49,
author = {Matthias Grossmann and Nicola H{\"o}nle and Daniela Nicklas and Bernhard Mitschang},
title = {{Reference Management in a Loosely Coupled, Distributed Information System}},
booktitle = {Proceedings of the 12th East-European Conference on Advances in Databases and Information Systems},
editor = {Paolo Atzeni and Albertas Caplinskas and Hannu Jaakkola},
publisher = {Springer-Verlag},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
series = {Lecture Notes in Computer Science},
type = {Conference Paper},
month = {September},
year = {2008},
language = {English},
cr-category = {E.2 Data Storage Representations,
H.2.2 Database Management Physical Design,
H.2.4 Database Management Systems},
contact = {Matthias Grossmann matthias.grossmann@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {References between objects in loosely coupled distributed information systems
pose a problem. On the one hand, one tries to avoid referential inconsistencies
like, e.g., dangling links in the WWW. On the other hand, using strict
constraints as in databases may restrict the data providers severely. We
present the solution to this problem that we developed for the Nexus system.
The approach tolerates referential inconsistencies in the data while providing
consistent query answers to users. For traversing references, we present a
concept based on return references. This concept is especially suitable for
infrequent object migrations and provides a good query performance. For
scenarios where object migrations are frequent, we developed an alternative
concept based on a distributed hash table.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-49&engl=1}
}
@inproceedings {INPROC-2008-46,
author = {Thorsten Scheibler and Ralph Mietzner and Frank Leymann},
title = {{EAI as a Service - Combining the Power of Executable EAI Patterns and SaaS}},
booktitle = {International EDOC Conference (EDOC 2008)},
publisher = {Springer},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--10},
type = {Conference Paper},
month = {September},
year = {2008},
language = {German},
cr-category = {H.4.1 Office Automation},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {One of the predominant problems IT companies are facing today is Enterprise
Application Integration (EAI). Most of the infrastructures built to tackle
integration issues are proprietary because no standards exist for how to model,
develop, and actually execute integration scenarios. Moreover, those systems
are built on top of Pipes-and-Filters architecture that offers only limited
capabilities for productive environments. As Service-oriented architectures
(SOA) can be seen as de-facto standard for building enterprise systems today,
including integration systems, there is a need to utilize those systems for
executing integration scenarios. Business processes in an SOA environment can
be used to integrate various applications to form an integration solution. Thus
the application domain of BPM is significantly extended. In this paper, we
introduce how integration solutions can be executed on BPM infrastructures. To
demonstrate this we introduce a tool supporting integration architects to
design integration scenarios and execute these solutions automatically.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-46&engl=1}
}
@inproceedings {INPROC-2008-32,
author = {Sylvia Radesch{\"u}tz and Florian Niedermann and Bernhard Mitschang},
title = {{Ein Annotationsansatz zur Unterst{\"u}tzung einer ganzheitlichen Gesch{\"a}ftsanalyse}},
booktitle = {Proc. of the 5th Conference on Data Warehousing: Synergien durch Integration und Informationslogistik. (DW2008); St. Gallen, 27.-28. Oktober, 2008},
publisher = {Lecture Notes in Informatics (LNI)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--19},
type = {Conference Paper},
month = {October},
year = {2008},
language = {German},
cr-category = {D.2.2 Software Engineering Design Tools and Techniques,
D.2.12 Software Engineering Interoperability,
H.4.1 Office Automation,
H.5.2 Information Interfaces and Presentation User Interfaces},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Die Verbesserung der Gesch{\"a}ftsprozesse in einem Unternehmen spielt eine immer
wichtigere Rolle, um Wettbewerbsvorteile gegen{\"u}ber der Konkurrenz zu erlangen.
Daf{\"u}r ist eine umfassende Analyse n{\"o}tig {\"u}ber alle verf{\"u}gbaren Informationen in
diesem Unternehmen. Aktuelle Verfahren konzentrieren sich entweder auf die
Analyse von Prozessdaten oder die Analyse von operativen Anwendungsdaten, die
typischerweise in einem Data Warehouse vorliegen. F{\"u}r die Ausf{\"u}hrung einer
tiefergehenden Analyse ist es jedoch notwendig, Prozessdaten und operative
Daten zu verkn{\"u}pfen. Dieser Beitrag stellt zwei Ans{\"a}tze vor, welche es
erm{\"o}glichen, diese Daten effektiv und flexibel zusammenzuf{\"u}hren. Der erste
Ansatz stellt eine direkte Verkn{\"u}pfung von Entit{\"a}ten aus den Prozessdaten mit
Entit{\"a}ten aus den operativen Daten her. Die Verkn{\"u}pfung im zweiten Ansatz
beruht hingegen auf der semantischen Beschreibung der Daten. Beide Methoden
sind in einem Werkzeug realisiert.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-32&engl=1}
}
@inproceedings {INPROC-2008-21,
author = {Andreas Brodt and Daniela Nicklas},
title = {{The TELAR mobile mashup platform for Nokia Internet Tablets}},
booktitle = {EDBT '08: Proceedings of the 11th international conference on Extending database technology; Nantes, France, March 25-29, 2008},
editor = {ACM},
publisher = {ACM New York, NY, USA},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
series = {ACM International Conference Proceeding Series},
volume = {261},
pages = {700--704},
type = {Conference Paper},
month = {March},
year = {2008},
doi = {10.1145/1353343.1353429},
isbn = {978-1-59593-926-5},
keywords = {location-based services, GPS, DCCI, mashup},
language = {English},
cr-category = {H.2.5 Heterogeneous Databases,
H.5.1 Multimedia Information Systems,
H.5.4 Hypertext/Hypermedia},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2008-21/INPROC-2008-21.pdf},
contact = {andreas.brodt@ipvs.uni-stuttgart.de, daniela.nicklas@uni-oldenburg.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {With the Web 2.0 trend and its participation of end-users more and more data
and information services are online accessible, such as web sites, Wikis, or
web services. The integration of this plethora of information is taken over by
the community: so-called Mashups---web applications that combine data from more
than one source into an integrated service---spring up like mushrooms, because
they can be easily realized using script languages and web development
platforms. Another trend is that mobile devices that get more and more powerful
have ubiquitous access to the Web. Local sensors (such as GPS) can easily be
connected to these devices. Thus, mobile applications can adapt to the current
situation of the user, which can change frequently because of his or her
mobility.
In this demonstration, we present the Telar Mashup platform, a client-server
solution that facilitates the creation of adaptive Mashups for mobile devices
such as the Nokia Internet Tablets. On the server side, wrappers allow the
integration of data from web-based services. On the client side, a simple
implementation of the DCCI specification is used to integrate context
information of local sensors into the mobile Web browser, which adapts the
Mashup to the user's current location. We show an adaptive, mobile Mashup on
the Nokia N810 Internet Tablet.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-21&engl=1}
}
@inproceedings {INPROC-2008-18,
author = {Daniela Nicklas and Matthias Grossmann and Jorge Minguez and Mattias Wieland},
title = {{Adding High-level Reasoning to Efficient Low-level Context Management: a Hybrid Approach}},
booktitle = {Proceedings of the Sixth Annual IEEE Conference on Pervasive Computing and Communications : PerCom'08 Workshops, in 5th IEEE PerCom Workshop on Context Modeling and Reasoning; Hongkong, 17.-21. March 2008},
address = {Los Alamitos - California, Washington, Tokyo},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
series = {IEEE Computer Society},
volume = {Order Number E3113},
pages = {447--452},
type = {Workshop Paper},
month = {March},
year = {2008},
isbn = {0-7695-3113-X},
isbn = {978-0-7695-3113-7},
keywords = {higher level context; pervasive computing; ubiquitous computing; context-aware applications},
language = {English},
cr-category = {D.1.6 Logic Programming,
D.2.11 Software Engineering Software Architectures,
H.2.8 Database Applications},
ee = {http://www.nexus.uni-stuttgart.de/COMOREA,
http://www.nexus.uni-stuttgart.de},
contact = {Daniela Nicklas dnicklas@acm.org},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {Rule-based context reasoning is an expressive way to define situations, which
are crucial for the implementation of many context-aware applications. Along
the scenario of the Conference Guard application we show how this reasoning can
be done both by leveraging an efficient context management (realized by the
Nexus platform) and by a generic rule based service. We present the
architecture of the Nexus semantic service, which uses the underlying
definition of a low-level context model (the Nexus Augmented World Model) to
carry out rules given in first order logic. We realize this service in a
straight forward manner by using state-of-the-art software components (the Jena
2 framework) and evaluate the number of instances this approach can handle. Our
first experiences show that a pre-selection of instances is necessary if the
semantic service should work on a large-scale context model.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-18&engl=1}
}
@inproceedings {INPROC-2008-150,
author = {Laura Kassner and Vivi Nastase and Michael Strube},
title = {{Acquiring a Taxonomy from the German Wikipedia}},
booktitle = {Proceedings of the Sixth International Conference on Language Resources and Evaluation (LREC'08)},
editor = {Nicoletta Calzolari},
publisher = {European Language Resources Association (ELRA)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--4},
type = {Conference Paper},
month = {May},
year = {2008},
isbn = {2-9517408-4-0},
keywords = {taxonomy; ontology; taxonomy generation; ontology generation; semantic network; Wikipedia; WordNet; GermaNet; multilinguality},
language = {English},
cr-category = {I.2.4 Knowledge Representation Formalisms and Methods,
I.2.7 Natural Language Processing},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2008-150/INPROC-2008-150.pdf,
http://www.lrec-conf.org/proceedings/lrec2008/},
contact = {laura.kassner@gsame.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {This paper presents the process of acquiring a large, domain independent,
taxonomy from the German Wikipedia. We build upon a previously implemented
platform that extracts a semantic network and taxonomy from the English version
of theWikipedia. We describe two accomplishments of our work: the semantic
network for the German language in which isa links are identifed and annotated,
and an expansion of the platform for easy adaptation for a new language. We
identify the platform's strengths and shortcomings, which stem from the
scarcity of free processing resources for languages other than English. We show
that the taxonomy induction process is highly reliable - evaluated against the
German version of WordNet, GermaNet, the resource obtained shows an accuracy of
83.34\%.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-150&engl=1}
}
@inproceedings {INPROC-2008-112,
author = {Steffen Volz and Daniela Nicklas and Matthias Grossmann and Matthias Wieland},
title = {{On creating a spatial integration schema for global, context-aware applications}},
booktitle = {Proceedings of the X Brazilian Symposium on GeoInformatics (GeoInfo2008)},
publisher = {INPE},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
type = {Conference Paper},
month = {December},
year = {2008},
language = {English},
cr-category = {H.2.4 Database Management Systems,
H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Architecture of Application Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The world of spatial data is split into individual data source islands that
have different thematic or spatial focuses. When attempting to integrate those
data sources, severe challenges arise, since for most GIS application domains a
spatial integration schema does not exist. This is also true for the newly
emerging domain of mobile, context-aware applications. Since the users of these
systems are mobile, transborder access to spatial data or context models is
crucial for global deployment. The basis for this work is the Nexus Augmented
World Schema, a conceptual schema that serves as an integration standard for
autonomous spatial context servers. This paper analyzes some major spatial data
standards, especially with respect to the requirements of a spatial integration
schema for context-aware applications and illustrates the Nexus approach.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-112&engl=1}
}
@inproceedings {INPROC-2008-05,
author = {Sylvia Radesch{\"u}tz and Bernhard Mitschang and Frank Leymann},
title = {{Matching of Process Data and Operational Data for a Deep Business Analysis}},
booktitle = {Proc. of the 4th International Conference on Interoperability for Enterprise Software and Applications (I-ESA 2008), Berlin, M{\"a}rz 26-28, 2008.},
address = {London},
publisher = {Springer-Verlag},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {171--182},
type = {Conference Paper},
month = {March},
year = {2008},
doi = {10.1007/978-1-84800-221-0_14},
language = {English},
cr-category = {H.2.4 Database Management Systems},
ee = {http://www.aidima.es/iesa2008/},
department = {University of Stuttgart, Institute of Architecture of Application Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Efficient adaptation to new situations of a company's business and its business
processes plays an important role for achieving advantages in competition to
other companies. For an optimization of processes, a profound analysis of all
relevant information in the company is necessary. Analyses typically specialize
either on process analysis or on data warehousing of operational data. A
consolidation of business data is needed, i.e. of internal process execution
data and external operational data, in order to allow for interoperability
between these major business data sources to analyze and optimize processes in
a much more comprehensive scope. This paper introduces a framework that offers
various data descriptions to reach an efficient matching of process data and
operational data, and shows its enhancement compared to separate analyses and
other matching approaches.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-05&engl=1}
}
@inproceedings {INPROC-2008-04,
author = {Lu Jing and Mitschang Bernhard},
title = {{A Constraint-Aware Query Optimizer for Web-based Data Integration}},
booktitle = {Proceedings of the Fourth International Conference on Web Information Systems and Technologies, May 4-7, 2008.},
address = {Funchal, Madeira, Portugal},
publisher = {Conference Proceedings},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--6},
type = {Conference Paper},
month = {May},
year = {2008},
language = {English},
cr-category = {H.3.5 Online Information Services},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Web has brought forth opportunities to connect information sources across all
types of boundaries. The information sources include databases, XML documents,
and other unstructured sources. Data integration is to combine data residing at
different sources and providing the user with a unified view of these data.
Currently users are expecting more efficient services from such data
integration systems. Indeed, querying multiple data sources scattered on the
web encounters many barriers for achieving efficiency due to the heterogeneity
and autonomy of the information sources. This paper describes a query
optimizer, which uses constraints to semantically optimize the queries. The
optimizer first translates constraints from data sources into constraints
expressed at the global level, e.g., in the common schema, and stores them in
the constraint repository, again, at the global level. Then the optimizer can
use semantic query optimization technologies including detection of empty
results, join elimination, and predicate elimination to generate a more
efficient but semantically equivalent query for the user. The optmizer is
published as a web service and can be invoked by many data integration systems.
We carry out experiments using our semantic query optimizer and first results
show that performance can be greatly improved.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-04&engl=1}
}
@inproceedings {INPROC-2008-02,
author = {Marko Vrhovnik and Holger Schwarz and Sylvia Radesch{\"u}tz and Bernhard Mitschang},
title = {{An Overview of SQL Support in Workflow Products}},
booktitle = {Proc. of the 24th International Conference on Data Engineering (ICDE 2008), Canc{\'u}n, M{\'e}xico, April 7-12, 2008},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--8},
type = {Conference Paper},
month = {April},
year = {2008},
language = {English},
cr-category = {H.2.4 Database Management Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Over the last years, data management products as well as workflow products have
established themselves as indispensable building blocks for advanced IT systems
in almost all application areas. Recently, many vendors have created innovative
product extensions that combine service-oriented frameworks with powerful
workflow and data management capabilities.
In this paper, we discuss several workflow products from different vendors with
a specific focus on their SQL support. We provide a comparison based on a set
of important data management patterns and illustrate the characteristics of
various approaches by means of a running example.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-02&engl=1}
}
@inproceedings {INPROC-2008-01,
author = {Marko Vrhovnik and Holger Schwarz and Stephan Ewen and Oliver Suhre},
title = {{PGM/F: A Framework for the Optimization of Data Processing in Business Processes}},
booktitle = {Proc. of the 24th International Conference on Data Engineering (ICDE 2008), Canc{\'u}n, M{\'e}xico, April 7-12, 2008},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--4},
type = {Conference Paper},
month = {April},
year = {2008},
language = {English},
cr-category = {H.2.4 Database Management Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Business process execution languages like BPEL are broadly adopted by industry
to integrate the heterogeneous applications and data store of an enterprise.
Leading vendors provide extensions to BPEL that allow for a tight integration
of data processing capabilities into the process logic. Business processes
exploiting these capabilities show a remarkable potential for optimization. In
this demonstration, we present PGMOF, a framework for the optimization of data
processing in such business processes.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2008-01&engl=1}
}
@inproceedings {INPROC-2007-66,
author = {Mih{\'a}ly Jakob and Oliver Schiller and Holger Schwarz and Fabian Kaiser},
title = {{flashWeb: Graphical Modeling of Web Applications for Data Management}},
booktitle = {Tutorials, posters, panels and industrial contributions at the 26th International Conference on Conceptual Modeling - ER 2007, Auckland New Zealand, December 2007. Vol. 83},
editor = {John Grundy and Sven Hartmann and Alberto H. F. Laender and Leszek Maciaszek and John F. Roddick},
address = {Auckland, New Zealand},
publisher = {ACS},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {59--64},
type = {Conference Paper},
month = {December},
year = {2007},
isbn = {978-1-920682-64-4},
keywords = {Model-driven Web engineering; Web application modeling; Code Generation},
language = {English},
cr-category = {D.2.3 Software Engineering Coding Tools and Techniques,
D.2.11 Software Engineering Software Architectures,
H.4 Information Systems Applications,
H.5.4 Hypertext/Hypermedia},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2007-66/INPROC-2007-66.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {This paper presents flashWeb, a Computer-Aided Web Engineering (CAWE) tool for
the model-driven development of web applications that focus on data management.
Present-day web applications, like on-line auction systems or enterprise web
portals require comprehensive data access, data processing and data
manipulation capabilities. However, existing web application development
approaches treat data management operations as second-class citizens. They
integrate data operations into existing models or derive them as a by-product
of business processes. We argue that data management is an important part of
the application logic hence we capture operations with an additional Operation
Model. We show that the explicit modeling of operations provides many benefits
that distinguish our solution from other approaches. We present the flashWeb
development process utilizing a graphical notation for the models in use, a
CAWE tool that supports the creation of the graphical models and a code
generator that creates ready-to-run web applications.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-66&engl=1}
}
@inproceedings {INPROC-2007-64,
author = {Nazario Cipriani and Matthias Grossmann and Daniela Nicklas and Bernhard Mitschang},
title = {{Federated Spatial Cursors}},
booktitle = {Proceedings of the IX Brazilian Symposium on Geoinformatics},
address = {S{\~a}o Jos{\'e} dos Campos, Brazil},
publisher = {Online},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {85--96},
type = {Conference Paper},
month = {November},
year = {2007},
isbn = {978-85-17-00036-2},
keywords = {nexus; cursor; federation; spatial},
language = {English},
cr-category = {H.2.4 Database Management Systems,
H.3.4 Information Storage and Retrieval Systems and Software},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2007-64/INPROC-2007-64.pdf,
http://www.geoinfo.info/geoinfo2007/anais_geoinfo2007.pdf},
contact = {Senden Sie eine E-mail an Nazario.Cipriani@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The usage of small mobile devices for data-intensive applications becomes more
and more self-evident. As a consequence we have to consider these devices and
their inherent characteristics in future system designs, like the limitations
of memory and communication bandwidth. For example, when querying data servers
for information, a mobile application can hardly anticipate the size of the
result set. Our approach is to give more control over the data delivery process
to the application, so that it can be adapted regarding its device status, the
costs and availability of communication channels, and the user’s needs. This
paper introduces a flexible and scalable approach by providing spatially
federated cursor functionality. It is based on an open federation over a set of
loosely coupled data sources that provide simple object retrieval interfaces.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-64&engl=1}
}
@inproceedings {INPROC-2007-61,
author = {Fabian Kaiser and Holger Schwarz and Mih{\'a}ly Jakob},
title = {{EXPOSE: Searching the Web for Expertise}},
booktitle = {Proceedings of the 30th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval, Amsterdam, The Netherlands, July 23-27, 2007.},
editor = {Wessel Kraaij and Arjen P. de Vries and Charles L. A. Clarke and Norbert Fuhr and Noriko Kando},
publisher = {ACM},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--1},
type = {Conference Paper},
month = {January},
year = {2007},
isbn = {978-1-59593-597-7},
keywords = {Expert Finding; Search Engine; Information Retrieval; Web Search; Knowledge Management},
language = {English},
cr-category = {H.3.3 Information Search and Retrieval},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In this demonstration we will present EXPOSE, our solution to finding experts
on the web. We show how EXPOSE supports the user in diverse tasks throughout
the whole search process and how using EXPOSE can improve the result quality
compared to ad-hoc searches with common web search engines.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-61&engl=1}
}
@inproceedings {INPROC-2007-59,
author = {Jing Lu and Bernhard Mitschang},
title = {{DIS-CS: Improving Enterprise Data Integration by Constraint Service}},
booktitle = {ISCA 20th INTERNATIONAL CONFERENCE ON COMPUTER APPLICATIONS IN INDUSTRY AND ENGINEERING, November 7-9, 2007, San Francisco, California, USA.},
publisher = {The International Society for Computers and Their Applications},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {212--217},
type = {Conference Paper},
month = {November},
year = {2007},
isbn = {978-1-880843-65-9},
language = {English},
cr-category = {H.2.4 Database Management Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {This paper presents an approach to provide a Constraint Service for XML-based
Data Integration Systems. Constraints from local data sources and global
constraints are expressed in a uniform constraint model based on Active XQuery
and are stored in Constraint Repository. We introduce the concept of Constraint
Wrapper and translate local constraints from data sources automatically into
the uniform constraint model through Constraint Wrapper with the help of schema
mapping information. The Constraint Service can be published as web service and
be invoked by different Data Integration Systems. Through the Constraint
Service, queries can be optimized, and global updates can be checked for
validity and integrity. We establish the DIS-CS system to facilitate both the
architecture and the implementation. We carry out experiments of Semantic Query
Optimization using Constraints. The results show that the performance of the
queries is greatly improved.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-59&engl=1}
}
@inproceedings {INPROC-2007-37,
author = {Daniela Nicklas},
title = {{Nexus—A Global, Active, and 3D Augmented Reality Model}},
booktitle = {Photogrammetic Week '07},
editor = {Dieter Fritsch},
address = {Heidelberg},
publisher = {Herbert Wichmann Verlag},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {325--334},
type = {Conference Paper},
month = {September},
year = {2007},
isbn = {978-3-87907-452-5},
keywords = {Nexus; location-based services; Augmented Reality; Geographic Models; context-awareness},
language = {English},
cr-category = {H.2.4 Database Management Systems,
H.2.8 Database Applications},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2007-37/INPROC-2007-37.pdf,
http://www.nexus.uni-stuttgart.de},
contact = {nicklas@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The proliferation of sensor technology, along with advances in wireless
communication and mobile devices, allow context-aware applications. These
applications need context information, which should be shared between different
applications to reduce the effort of modeling, obtaining, and managing. In the
Nexus project, a context sharing platform was developed that allows for global,
open context access of different applications. In this paper, different aspects
of this work are illustrated: how a global access can be achieved by a
federation approach, how these models can be active to support event
observation and notification, how the third dimension can be introduced in the
modeling and finally, how this leads to advanced applications that augment our
reality with virtual information. Finally, an outlook into the vision of the
World Wide Space is given.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-37&engl=1}
}
@inproceedings {INPROC-2007-29,
author = {Rodrigo Monteiro and Geraldo Zimbrao and Holger Schwarz and Bernhard Mitschang and Jano Souza},
title = {{DWFIST: Leveraging Calendar-based Pattern Mining in Data Streams}},
booktitle = {Proc. of the 9th International Conference on Data Warehousing and Knowledge Discovery (DaWaK 2007) Regensburg, Germany, 3-7 September, 2007},
publisher = {-},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {438--448},
type = {Conference Paper},
month = {September},
year = {2007},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Calendar-based pattern mining aims at identifying patterns on specific calendar
partitions. Potential calendar partitions are for example: every Monday, every
first working day of each month, every holiday. Providing flexible mining
capabilities for calendar-based partitions is especially challenging in a data
stream scenario. The calendar partitions of interest are not known a priori and
at each point in time only a subset of the detailed data is available. We show
how a data warehouse approach can be applied to this problem. The data
warehouse that keeps track of frequent itemsets holding on different partitions
of the original stream has low storage requirements. Nevertheless, it allows to
derive sets of patterns that are complete and precise. This work demonstrates
the effectiveness of our approach by a series of experiments.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-29&engl=1}
}
@inproceedings {INPROC-2007-28,
author = {Marko Vrhovnik and Holger Schwarz and Oliver Suhre and Bernhard Mitschang and Volker Markl and Albert Maier and Tobias Kraft},
title = {{An Approach to Optimize Data Processing in Business Processes}},
booktitle = {Proc. of the 33rd International Conference on Very Large Data Bases (VLDB 2007), Vienna, Austria, September 23-28, 2007},
publisher = {-},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--12},
type = {Conference Paper},
month = {September},
year = {2007},
language = {English},
cr-category = {H.2.4 Database Management Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In order to optimize their revenues and profits, an increasing number of
businesses organize their business activities in terms of business processes.
Typically, they automate important business tasks by orchestrating a number of
applications and data stores. Obviously, the performance of a business process
is directly dependent on the efficiency of data access, data processing, and
data management.
In this paper, we propose a framework for the optimization of data processing
in business processes. We introduce a set of rewrite rules that transform a
business process in such a way that an improved execution with respect to data
management can be achieved without changing the semantics of the original
process. These rewrite rules are based on a semi-procedural process graph model
that externalizes data dependencies as well as control flow dependencies of a
business process. Furthermore, we present a multi-stage control strategy for
the optimization process. We illustrate the benefits and opportunities of our
approach through a prototype implementation. Our experimental results
demonstrate that independent of the underlying database system performance
gains of orders of magnitude are achievable by reasoning about data and control
in a unified framework.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-28&engl=1}
}
@inproceedings {INPROC-2007-18,
author = {Matthias Wieland and Oliver Kopp and Daniela Nicklas and Frank Leymann},
title = {{Towards Context-Aware Workflows}},
booktitle = {CAiSE´07 Proceedings of the Workshops and Doctoral Consortium Vol.2, Trondheim, Norway, June 11-15th, 2007},
editor = {Barbara Pernici and Jon Atle Gulla},
publisher = {Tapir Acasemic Press},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {577--591},
type = {Workshop Paper},
month = {June},
year = {2007},
isbn = {978-82-519-2246-3},
keywords = {workflow systems; context-aware systems; ubiquitous systems; workflow modeling; development of context-aware applications; BPEL; Nexus},
language = {English},
cr-category = {H.4.1 Office Automation},
contact = {Matthias Wieland wielanms@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Architecture of Application Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Context-aware applications adapt their behavior based on changes of the
physical world, which is often obtained with a huge amount of sensors. Thus,
the development of such applications is cumbersome, in particular the
implementation of their often complex control flow. To ease the development of
context-aware applications we present the concept of context-aware workflows.
Thereafter we present an implementation of these concepts based on a standard
workflow language. Context-aware workflows are not only interesting for the
development of context-aware applications, but also enable workflow technology
to be applied in new domains that are process oriented and yet not supported by
workflow systems like production processes in the manufacturing industry. The
concept of context-aware workflows is a first approach that enables modeling
and execution of technical production processes with workflow systems normally
used for business processes.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-18&engl=1}
}
@inproceedings {INPROC-2007-125,
author = {Nazario Cipriani and Matthias Gro{\ss}mann and Daniela Nicklas and Bernhard Mitschang},
title = {{Federated Spatial Cursors}},
booktitle = {IX Brazilian Symposium on Geoinformatics, 25-28 November, Campos do Jordao, Sao Paulo, Brazil},
publisher = {INPE},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {85--96},
type = {Conference Paper},
month = {November},
year = {2007},
language = {English},
cr-category = {C.2.4 Distributed Systems,
H.2.4 Database Management Systems,
H.3.3 Information Search and Retrieval},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The usage of small mobile devices for data-intensive applications becomes more
and more self-evident. As a consequence we have to consider these devices and
their inherent characteristics in future system designs, like the limitations
of memory and communication bandwidth. For example, when querying data servers
for information, a mobile application can hardly anticipate the size of the
result set. Our approach is to give more control over the data delivery process
to the application, so that it can be adapted regarding its device status, the
costs and availability of communication channels, and the user’s needs. This
paper introduces a flexible and scalable approach by providing spatially
federated cursor functionality. It is based on an open federation over a set of
loosely coupled data sources that provide simple object retrieval interfaces.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-125&engl=1}
}
@inproceedings {INPROC-2007-108,
author = {Ralf Wagner and Bernhard Mitschang},
title = {{A Methodology and Guide for Effective Reuse in Integration Architectures for Enterprise Applications}},
booktitle = {Distributed Objects and Applications (DOA) 2007 International Conference},
publisher = {.},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {522--539},
type = {Conference Paper},
month = {November},
year = {2007},
language = {English},
cr-category = {A.0 General Literature, General},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-108&engl=1}
}
@inproceedings {INPROC-2007-107,
author = {Ralf Wagner and Bernhard Mitschang},
title = {{Flexible Reuse of Middleware Infrastructures in Heterogeneous IT Environments}},
booktitle = {Proceedings of the IEEE International Conference on Information Reuse and Integration},
publisher = {.},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {323--328},
type = {Conference Paper},
month = {August},
year = {2007},
language = {English},
cr-category = {A.0 General Literature, General},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-107&engl=1}
}
@inproceedings {INPROC-2007-106,
author = {Ralf Wagner and Bernhard Mitschang},
title = {{A Virtualization Approach for Reusing Middleware Adapters}},
booktitle = {Proceedings of the Ninth International Conference on Enterprise Information Systems},
publisher = {.},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {78--85},
type = {Conference Paper},
month = {June},
year = {2007},
language = {English},
cr-category = {A.0 General Literature, General},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-106&engl=1}
}
@inproceedings {INPROC-2007-105,
author = {Tobias Kraft and Holger Schwarz and Bernhard Mitschang},
title = {{A Statistics Propagation Approach to Enable Cost-Based Optimization of Statement Sequences}},
booktitle = {Proc. of the 11th East European Conference on Advances in Databases and Information Systems (ADBIS 2007), Varna, Bulgaria, September 29 - October 3, 2007},
publisher = {-},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {267--282},
type = {Conference Paper},
month = {September},
year = {2007},
language = {English},
cr-category = {H.2.4 Database Management Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-105&engl=1}
}
@inproceedings {INPROC-2007-104,
author = {Tobias Kraft and Bernhard Mitschang},
title = {{Statistics API: DBMS-Independent Access and Management of DBMS Statistics in Heterogeneous Environments}},
booktitle = {Proc. of the 9th International Conference on Enterprise Information Systems (ICEIS 2007), Funchal, Madeira, Portugal, June 12-16, 2007},
publisher = {-},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {5--12},
type = {Conference Paper},
month = {June},
year = {2007},
language = {English},
cr-category = {H.2.4 Database Management Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-104&engl=1}
}
@inproceedings {INPROC-2007-103,
author = {Tobias Kraft},
title = {{A Cost-Estimation Component for Statement Sequences}},
booktitle = {Proc. of the 33rd International Conference on Very Large Data Bases (VLDB 2007), Vienna, Austria, September 23-28, 2007},
publisher = {-},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1382--1385},
type = {Conference Paper},
month = {September},
year = {2007},
language = {German},
cr-category = {H.2.4 Database Management Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-103&engl=1}
}
@inproceedings {INPROC-2007-03,
author = {Clemens Dorda and Uwe Heinkel and Bernhard Mitschang},
title = {{Improving Application Integration with Model-Driven Engineering}},
booktitle = {Proceedings of International Conference on Information Technology and Management 2007 : ICITM 2007; Hong Kong, China, January 3-5, 2007},
editor = {Chan Man-Chung and James N.K. Liu and Ronnie Cheung and Joe Zhou},
address = {Hong Kong},
publisher = {ISM Press},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {94--101},
type = {Conference Paper},
month = {January},
year = {2007},
isbn = {988-97311-5-0},
keywords = {Enterprise Application Integration, Model-Driven Engineering, Software Lifecycle, EAI, MDA, MDE, UML, Unified Modeling Language},
language = {English},
cr-category = {D.2.2 Software Engineering Design Tools and Techniques,
D.2.13 Software Engineering Reusable Software,
I.6.5 Model Development},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2007-03/INPROC-2007-03.pdf},
contact = {Write message to Clemens.Dorda@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Modern software for Enterprise Application Integration (EAI)provide tools for
modeling integration scenarios. A drawback ofthese tools is the missing
functionality to exchange or integratemodels of different EAI products.
Consequently, developers candescribe real heterogeneous IT environments only
partially. Ourgoal is to avoid the creation of these so-called
‘integrationislands’. For that purpose we present an approach whichintroduces
an abstract view by technology-independent andmultivendor-capable modeling for
both development andmaintenance. With this approach, we propose a toolset-
andrepository-based refinement of the abstract view to automate
theimplementation with real products and the deployment on realplatforms.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2007-03&engl=1}
}
@inproceedings {INPROC-2006-57,
author = {Christoph Mangold and Holger Schwarz},
title = {{Documents meet Databases: A System for Intranet Search}},
booktitle = {13th International Conference on Management of Data (COMAD 2006), Delhi, India, December 14-16, 2006},
editor = {L. V. S. Lakshmanan and P. Roy and A. K. H. Tung},
address = {New Delhi},
publisher = {Tata McGraw-Hill Publishing Company Limited},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {227--230},
type = {Conference Paper},
month = {December},
year = {2006},
isbn = {0-07-063374-6},
language = {English},
cr-category = {H.3.3 Information Search and Retrieval},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2006-57/INPROC-2006-57.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In enterprise intranets, information is encoded in documents and databases.
Logically, the information in both worlds is tightly connected, however, on the
system level there is usually a large gap. In this paper, we propose a system
to retrieve documents in the enterprise intranet. The system is an extension to
common text search. It does not only consider the content of documents but also
it exploits the enterprise databases to determine the documents' context.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2006-57&engl=1}
}
@inproceedings {INPROC-2006-56,
author = {Christoph Mangold and Holger Schwarz and Bernhard Mitschang},
title = {{Symbiosis in the Intranet: How Document Retrieval Benefits from Database Information}},
booktitle = {13th International Conference on Management of Data (COMAD 2006), December 14-16, 2006, Delhi, India},
editor = {L. V. S. Lakshmanan and P. Roy and A. K. H. Tung},
address = {New Delhi},
publisher = {Tata McGraw-Hill Publishing Company Limited},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {201--204},
type = {Conference Paper},
month = {December},
year = {2006},
isbn = {0-07-063374-6},
language = {English},
cr-category = {H.3.3 Information Search and Retrieval},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2006-56/INPROC-2006-56.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The enterprise information space is split in two hemispheres. Documents contain
unstructured or semistructured information; structured information is stored in
databases. As regards the content, both kinds of information are complementary
parts. However, enterprise information systems usually focus on one part, only.
Our approach improves document retrieval in the intranet by exploiting the
enterprise's databases. In particular, we exploit database information to
describe the context of documents and exploit this context to enhance common
full text search. In this paper, we show how to model and compute document
context and present results on runtime performance},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2006-56&engl=1}
}
@inproceedings {INPROC-2006-54,
author = {Thomas Schwarz and Matthias Grossmann and Daniela Nicklas and Bernhard Mitschang},
title = {{Exploiting Type and Space in a Main Memory Query Engine}},
booktitle = {Proceedings of the VIII Brazilian Symposium on GeoInformatics : GeoInfo2006 ; Campos do Jord{\~a}o, Brazil, November 19-22, 2006},
editor = {Clodoveu Augusto Davis Junior and Antonio Miguel Vieira Monteiro},
publisher = {INPE},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {35--52},
type = {Conference Paper},
month = {November},
year = {2006},
isbn = {9 788517 000270},
keywords = {Main Memory Query Engine, Indexing, Spatial Index, Type Hierarchies, Deployable Query Engine},
language = {English},
cr-category = {H.2.4 Database Management Systems,
H.3.1 Content Analysis and Indexing,
H.2.8 Database Applications},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2006-54/INPROC-2006-54.pdf},
contact = {Thomas Schwarz schwarts@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {More and more spatial data is accessible over the web or through portals of
wireless service providers. In this context the main selection criteria for the
data are the type of the requested data objects and their position in the real
world. Integration and performance issues are challenged by the need to process
ad hoc queries in an interactive fashion. In this paper we investigate how a
main memory query engine can be used to meet these requirements. It has the
added benefit of being easily deployable to many components in a large-scale
data integration system. Hence, we analyze how such a query engine can best
exploit the query characteristics by employing an index structure that
leverages spatial and type dimensions.
In order to support query processing in the best possible way we investigate a
specific multi-dimensional main memory index structure. Compared to the
straightforward approach using separate indexes on type and position we can
increase the performance up to almost an order of magnitude in several
important usage scenarios. This requires to tweak the mapping of type IDs to
values in the type dimension, which we discuss extensively. This enables the
overall system to be used interactively, even with large data sets.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2006-54&engl=1}
}
@inproceedings {INPROC-2006-52,
author = {Christoph Mangold and Holger Schwarz and Bernhard Mitschang},
title = {{u38: A Framework for Database-Supported Enterprise Document-Retrieval}},
booktitle = {Proceedings of the Tenth International Database Engineering \& Apllications Symposium (IDEAS2006), Delhi, India, December 11-14, 2006},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--8},
type = {Conference Paper},
month = {December},
year = {2006},
language = {English},
cr-category = {H.3.3 Information Search and Retrieval},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In enterprises, information is encoded in documents and databases. Logically,
the information in both worlds is tightly connected, however, on the system
level there is usu- ally a large gap. In this paper, we propose a framework
that improves document retrieval by exploiting available enter- prise
databases. In particular, we use database information to model the context of
documents and incorporate this con- text in our search framework. We present
our framework architecture, its components and its major interfaces. The
framework can be configured and enhanced at well-defined points and, hence, can
easily be customized to other do- mains. We furthermore evaluate its core
components. Our experiments show that the context-aware approach signifi-
cantly improves the quality of search results.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2006-52&engl=1}
}
@inproceedings {INPROC-2006-49,
author = {Mih{\'a}ly Jakob and Holger Schwarz and Fabian Kaiser and Bernhard Mitschang},
title = {{Towards an operation model for generated web applications}},
booktitle = {Workshop proceedings of the sixth international conference on Web engineering (MDWE 2006); Palo Alto, California, USA, July 2006},
editor = {Association for Computing Machinery (ACM)},
address = {New York},
publisher = {ACM Press New York, NY, USA},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Conference Paper},
month = {July},
year = {2006},
language = {English},
cr-category = {D.2.3 Software Engineering Coding Tools and Techniques,
D.2.11 Software Engineering Software Architectures,
H.4 Information Systems Applications,
H.5.4 Hypertext/Hypermedia},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {This paper describes a new approach for the development of data-intensive web
applications that depend on non-trivial data manipulation. E-Commerce web
sites, on-line auction systems and large enterprise web portals fall into this
category as they require comprehensive data access, data processing and data
manipulation capabilities. However, existing methodologies mainly concentrate
on modeling content, navigation and presentation aspects of read-only web
sites. Approaches that consider modeling data operations incorporate them into
existing models resulting in a less clear design. We argue that existing models
are not sufficient to express complex operations that access or modify web
application content. Therefore, we propose an additional Operation Model
defining operations for data-intensive web applications. We also propose the
utilization of a web application generator to create an Operation Layer based
on this Operation Model.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2006-49&engl=1}
}
@inproceedings {INPROC-2006-48,
author = {Mih{\'a}ly Jakob and Holger Schwarz and Fabian Kaiser and Bernhard Mitschang},
title = {{Modeling and Generating Application Logic for Data-Intensive Web Applications}},
booktitle = {Proceedings of the 6th international conference on Web engineering (ICWE2006); Palo Alto, California, USA, July 2006},
editor = {Association for Computing Machinery (ACM)},
address = {New York},
publisher = {ACM Press New York, NY, USA},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {77--84},
type = {Conference Paper},
month = {July},
year = {2006},
language = {English},
cr-category = {D.2.3 Software Engineering Coding Tools and Techniques,
D.2.11 Software Engineering Software Architectures,
H.4 Information Systems Applications,
H.5.4 Hypertext/Hypermedia},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {This paper presents a new approach for the development of data-intensive web
applications that depend on sophisticated application logic. E-Commerce web
sites, on-line auction systems and large enterprise web portals fall into this
category as they require comprehensive data access, data processing and data
manipulation capabilities. However, existing methodologies mainly concentrate
on modeling content, navigation and presentation aspects of read-only web
sites. In our opinion these models are not sufficient to express complex
operations that access or modify web application content. Therefore, we propose
an additional Operation Model defining the application logic of a web
application. We show that based on this model a significant part of a web
application’s Operation Layer can be generated, still allowing the manual
implementation of arbitrary additional functionality. We evaluate our approach
and present experimental results based on a large example application for the
area of innovation management.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2006-48&engl=1}
}
@inproceedings {INPROC-2006-47,
author = {Matthias Wieland and Daniela Nicklas},
title = {{Ein Framework f{\"u}r kontextbezogene Anwendungen in der Nexus-Plattform}},
booktitle = {3. GI/ITG KuVS Fachgespr{\"a}ch: Ortsbezogene Anwendungen und Dienste},
editor = {Institut f{\"u}r Informatik der Freien Universit{\"a}t Berlin},
address = {Berlin},
publisher = {Freie Universit{\"a}t Berlin, Universit{\"a}tsbibliothek},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {32--35},
type = {Conference Paper},
month = {September},
year = {2006},
isbn = {3-929619-39-3},
keywords = {Umgebungsmodell; Framework; Kontext; Anwendung; Nexus},
language = {German},
cr-category = {H.4 Information Systems Applications},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2006-47/INPROC-2006-47.pdf},
contact = {Senden Sie eine E-Mail an matthias.wieland(at)iaas.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Architecture of Application Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Im Rahmen des Nexus-Projekts wurden zahlreiche kontextbezogene Anwendungen
entwickelt. Aus diesen Erfahrungen heraus wurde ein Framework erarbeitet, das
die Entwicklung orts- und kontextbezogener Anwendungen durch geeignete
Abstraktionen und Laufzeitbibliotheken unterst{\"u}tzt. Dieser Beitrag beschreibt
den Aufbau und die Vorteile dieses Frameworks. Zum Schluss geht der Beitrag
noch auf die zuk{\"u}nftige Entwicklung des Nexus-Projekts im Bereich
Anwendungsunterst{\"u}tzung ein.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2006-47&engl=1}
}
@inproceedings {INPROC-2006-32,
author = {Ralf Rantzau and Christoph Mangold},
title = {{Laws for Rewriting Queries Containing Division Operators}},
booktitle = {Proceedings of the 22nd International Conference on Data Engineering, ICDE 2006},
editor = {Ling Liu and Andreas Reuter and Kyu-Young Whang and Jianjun Zhang},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {21--21},
type = {Conference Paper},
month = {April},
year = {2006},
language = {English},
cr-category = {F.4.1 Mathematical Logic,
H.2.3 Database Management Languages},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2006-32/INPROC-2006-32.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Relational division, also known as small divide, is a derived operator of the
relational algebra that realizes a many-to-one set containment test, where a
set is represented as a group of tuples: Small divide discovers which sets in a
dividend relation contain all elements of the set stored in a divisor relation.
The great divide operator extends small divide by realizing many-to-many set
containment tests. It is also similar to the set containment join operator for
schemas that are not in first normal form.
Neither small nor great divide has been implemented in commercial relational
database systems although the operators solve important problems and many
efficient algorithms for them exist. We present algebraic laws that allow
rewriting expressions containing small or great divide, illustrate their
importance for query optimization, and discuss the use of great divide for
frequent itemset discovery, an important data mining primitive.
A recent theoretic result shows that small divide must be implemented by
special purpose algorithms and not be simulated by pure relational algebra
expressions to achieve efficiency. Consequently, an efficient implementation
requires that the optimizer treats small divide as a first-class operator and
possesses powerful algebraic laws for query rewriting.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2006-32&engl=1}
}
@inproceedings {INPROC-2006-14,
author = {Fabian Kaiser and Holger Schwarz and Mih{\'a}ly Jakob},
title = {{Finding Experts on the Web}},
booktitle = {Proceedings of the Second International Conference on Web Information Systems and Technologies, Set{\'u}bal, Portugal, April 11-13, 2006},
editor = {Jos{\'e} Cordeiro and Vitor Pedrosa and Bruno Encarnacao and Joaquim Filipe},
publisher = {INSTICC},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {363--368},
type = {Conference Paper},
month = {April},
year = {2006},
language = {English},
cr-category = {H.3.3 Information Search and Retrieval},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In this paper, we present an integrated approach on finding experts for
arbitrary user defined topics on the World Wide Web. We discuss the special
challenges that come along with this issue and why solely applying standard
techniques and standard tools like Web search engines is not suitable. We point
out the necessity for a dedicated expert search engine, based on a Focused
Crawler. The main contribution of our work is an approach to integrate standard
Web search engines into the process of searching for experts to utilize the
search engines' knowledge about content and structure of the Web.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2006-14&engl=1}
}
@inproceedings {INPROC-2006-09,
author = {Andre Blessing and Stefan Klatt and Daniela Nicklas and Steffen Volz and Hinrich Sch{\"u}tze},
title = {{Language-Derived Information and Context Models}},
booktitle = {Proceedings of 3rd IEEE PerCom Workshop on Context Modeling and Reasoning (CoMoRea) (at 4th IEEE International Conference on Pervasive Computing and Communication (PerCom'06))},
publisher = {IEEE},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
type = {Conference Paper},
month = {January},
year = {2006},
keywords = {nlp nexus context modeling and reasoning},
language = {German},
cr-category = {H.2.1 Database Management Logical Design},
ee = {http://www.nexus.uni-stuttgart.de},
contact = {Andre Blessing Andre.Blessing@ims.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
Universit{\"a}t Stuttgart, Institut f{\"u}r Photogrammetrie (ifp)},
abstract = {There are a number of possible sources for information about the environment
when creating or updating a context model, including sensorial input,
databases, and explicit modeling by the system designer. Another source is
natural language, either in the form of electronic text (e.g., the world wide
web) or speech. In this paper, we investigate the implications for context
models when some of their information is derived linguistically with an
emphasis on the issues of hybrid models and mapping between entities in
language and context model. We present a prototype that tests some of our
ideas.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2006-09&engl=1}
}
@inproceedings {INPROC-2006-06,
author = {Matthias Wieland and Frank Leymann and Lamine Jendoubi and Daniela Nicklas and Frank D{\"u}rr},
title = {{Task-orientierte Anwendungen in einer Smart Factory}},
booktitle = {Mobile Informationssysteme - Potentiale, Hindernisse, Einsatz. Proceedings MMS´06},
editor = {Thomas Kirste and Birgitta K{\"o}nig-Ries and Key Pousttchi and Klaus Turowski},
address = {Bonn},
publisher = {Gesellschaft f{\"u}r Informatik},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
series = {Lecture Notes in Informatics (LNI)},
volume = {P-76},
pages = {139--143},
type = {Conference Paper},
month = {February},
year = {2006},
isbn = {3-88579-170-6},
keywords = {Kontextbezogene Applikationen; Smart Factory; kontextbezogene Workflows},
language = {German},
cr-category = {H.4.2 Information Systems Applications Types of Systems},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2006-06/INPROC-2006-06.pdf,
http://www.nexus.uni-stuttgart.de/},
contact = {Matthias Wieland wieland@informatik.uni-stuttgart.de, Daniela Nicklas nicklas@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
Universit{\"a}t Stuttgart, Institut f{\"u}r Industrielle Fertigung und Fabrikbetrieb (IFF);
University of Stuttgart, Institute of Architecture of Application Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
abstract = {In diesem Beitrag wird aufgezeigt, welche M{\"o}glichkeiten sich durch den Einsatz
task-orientierter, explorativer Anwendungen im Umfeld einer Smart Factory
ergeben. Nach einer kurzen Charakterisierung dieser Anwendungsklasse wird ein
Szenario geschildert, das Wartungsaufgaben in einer Fabrik mit diesen Konzepten
l{\"o}st. Daraus ergibt sich die Vision einer Smart Factory, in der die
Gesch{\"a}ftsprozesse {\"u}ber ein Kontextmodell mit den technischen Prozessen der
Produktionsstra{\ss}e gekoppelt werden.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2006-06&engl=1}
}
@inproceedings {INPROC-2005-84,
author = {Uwe Heinkel and Carmen Constantinescu and Bernhard Mitschang},
title = {{Integrating Data Changes with Data from Data Service Providers}},
booktitle = {Proceedings of the 18th International Conference on Computer Applications in Industry and Engineering (CAINE 2005)},
publisher = {ICSA},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {146--151},
type = {Conference Paper},
month = {November},
year = {2005},
language = {English},
cr-category = {H.2.5 Heterogeneous Databases},
contact = {Uwe.Heinkel@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In these days, enterprises are more and more confronted to fast changing and
turbulent markets. In order to remain competitive, they have to quickly adapt
themselves to these new situations. The integration solutions employed in such
an environment enable the required agility, by using a loose and flexible
integration architecture. We propose an integration solution based on the
concept of propagating data changes from one information system to the affected
information systems. This paper focuses on the question: how data from other
data services can be accessed and exploited to enhance a data change
propagation system. These services are based on a service-oriented architecture
(SOA) using XML technologies (e.g. SOAP). We developed the Data Service
Description Language (DSDL) to define the data structures of the data service.
This description is used to create the above mentioned Transformation Scripts
as well as to generate requests for accessing remote data. A layered system
approach is introduced to facilitate both the architecture and its
implementation.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-84&engl=1}
}
@inproceedings {INPROC-2005-63,
author = {Joos-Hendrik B{\"o}se and Stefan B{\"o}ttcher and Le Gruenwald and Evaggelia Pitoura and Peter Reiher and George Samaras and Thomas Schwarz and Can T{\"u}rker},
title = {{04441 Working Group - Research Issues in Mobile Transactions}},
booktitle = {Mobile Information Management},
editor = {Margaret H. Dunham and Birgitta K{\"o}nig-Ries and Pitoura Evaggelia and Peter Reiher and Can T{\"u}rker},
address = {Schloss Dagstuhl, Germany},
publisher = {Internationales Begegnungs- und Forschungszentrum (IBFI},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
series = {Dagstuhl Seminar Proceedings},
volume = {04441},
type = {Conference Paper},
month = {January},
year = {2005},
keywords = {Transactions; mobile clients; multi-hop wireless networks},
language = {English},
cr-category = {H.2.4 Database Management Systems},
ee = {http://drops.dagstuhl.de/opus/volltexte/2005/168/},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {This document discusses three scenarios for databases with mobile clients,
summarizes typical applications and requirements for each of the three
scenarios, and outlines the open research issues which should be solved within
each of the three scenarios. While the first scenario consists of mobile
clients that are connect to a wired network, the second scenario consists of a
network of mobile clients with a single-hop distance to each other but without
a wired network, and the third scenario considers a network of mobile clients
some of which are in multi-hop distance.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-63&engl=1}
}
@inproceedings {INPROC-2005-62,
author = {Susanne Boll and Martin Breunig and Nigel Davies and Christian S. Jensen and Birgitta K{\"o}nig-Ries and Rainer Malaka and Florian Matthes and Christoforo Panayiotou and Simonas Saltenis and Thomas Schwarz},
title = {{04441 Working Grou - Towards a Handbook for User-Centred Mobile Application Design}},
booktitle = {Mobile Information Management},
editor = {Margaret H. Dunham and Birgitta K{\"o}nig-Ries and Pitoura Evaggelia and Peter Reiher and Can T{\"u}rker},
address = {Schloss Dagstuhl, Germany},
publisher = {Internationales Begegnungs- und Forschungszentrum (IBFI},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
series = {Dagstuhl Seminar Proceedings},
volume = {04441},
type = {Conference Paper},
month = {January},
year = {2005},
keywords = {User-Centred Mobile Application Design},
language = {English},
cr-category = {D.2.10 Software Engineering Design},
ee = {http://drops.dagstuhl.de/opus/volltexte/2005/166},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Why do we have difficulties designing mobile apps? Is there a ``Mobile RUP''?},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-62&engl=1}
}
@inproceedings {INPROC-2005-58,
author = {Daniela Nicklas and Susanne B{\"u}rklen and Tobias Drosdol and Nicola H{\"o}nle},
title = {{Benefits of Context Models in Smart Environments}},
booktitle = {2. GI/ITG KuVS Fachgespr{\"a}ch Ortsbezogene Anwendungen und Dienste},
editor = {J{\"o}rg Roth},
address = {Hagen},
publisher = {Fernuniversit{\"a}t Hagen},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
series = {Informatik Bericht},
volume = {324},
pages = {12--15},
type = {Conference Paper},
month = {June},
year = {2005},
keywords = {context models; smart environment; smart room; Nexus},
language = {English},
cr-category = {H.4 Information Systems Applications},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2005-58/INPROC-2005-58.pdf,
http://dreamteam.fernuni-hagen.de/fg_lbs/meeting_2/meeting_2.html,
http://www.nexus.uni-stuttgart.de},
contact = {daniela.nicklas@informatik.uni-stuttgart.de susanne.buerklen@informatik.uni-stuttgart.de tobias.drosdol@informatik.uni-stuttgart.de nicola.hoenle@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
abstract = {Ongoing technologic advances drive the emergence of smart items, everyday
objects with embedded microcomputers and communication capabilities. In a smart
envi-ronment a multitude of such smart items exist to assist its users. In this
paper, we will show how smart environments can benefit from the concepts of the
Nexus platform, an open pervasive computing system that supports various kinds
of context-aware applica-tions by providing a federated, potentially global
context model.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-58&engl=1}
}
@inproceedings {INPROC-2005-57,
author = {Christoph Mangold and Holger Schwarz and Bernhard Mitschang},
title = {{Improving Intranet Search Engines Using Context Information from Databases}},
booktitle = {Proceedings of the 14th ACM International Conference on Information and Knowledge Management (CIKM 2005), Bremen, Germany, October 31 - November 5, 2005},
editor = {A. Chowdhury and N. Fuhr and M. Ronthaler and H.-J. Schek and W. Teiken},
publisher = {ACM Press},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {349--350},
type = {Conference Paper},
month = {October},
year = {2005},
isbn = {1-59593-140-6},
language = {English},
cr-category = {H.3.3 Information Search and Retrieval},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2005-57/INPROC-2005-57.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Information in enterprises comes in documents and databases. From a semantic
viewpoint, both kinds of information are usually tightly connected. In this
paper, we propose to enhance common search-engines with contextual information
retrieved from databases. We establish system requirements and anecdotally
demonstrate how documents and database information can be represented as the
nodes of a graph. Then, we give an example how we exploit this graph
information for document retrieval.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-57&engl=1}
}
@inproceedings {INPROC-2005-36,
author = {Mih{\'a}ly Jakob and Fabian Kaiser and Holger Schwarz},
title = {{SEMAFOR: A Framework for an Extensible Scenario Management System}},
booktitle = {Proc. of the IEEE International Engineering Management Conference (IEMC) 2005; 2005 in St. John's, Newfoundland, September 11-14, 2005},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--5},
type = {Conference Paper},
month = {September},
year = {2005},
language = {English},
cr-category = {H.3 Information Storage and Retrieval},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The development of successful new products and services in highly dynamic
business environments has become an extremely difficult task. Innovation
managers have to utilize a considerable amount of enterprise-internal and
enterprise-external information to judge the potential of new products,
services and technologies.
Scenario management is a powerful instrument to face this problem. Scenarios
represent an intuitive concept to comprehend complex present-day and future
situations. The scenario technique is a method for building such scenarios.
Assessments of future business environments relying on structured scenarios
enable innovation managers to target lucrative market segments and to select
promising product ideas. However, diverse enterprise-internal and
enterprise-external resources have to be utilized to assist the scenario
development process. Last but not least, existing methods are often extremely
time-consuming and existing tools for scenario development fail to provide a
comprehensive solution as they are limited to certain steps in fixed workflows.
In this paper, we propose a modular web-based framework for the flexible and
efficient development and handling of scenarios. Key aspects of our framework
are module-based enterprise-specific workflows, the integration of distributed
human resources into the scenario development process and diverse interfaces to
enterprise-internal and enterprise-external information sources. The framework
is based on self-contained software modules that cover important steps of the
scenario management process. This modularity allows the easy supplementation of
existing 'scenario technique' methods with newly developed methods that are
incorporated into modules and can be combined in a flexible way to fit
enterprise-specific requirements.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-36&engl=1}
}
@inproceedings {INPROC-2005-33,
author = {Carmen Constantinescu and Uwe Heinkel and Jan Le Blond and Stephan Schreiber and Bernhard Mitschang and Engelbert Westk{\"a}mper},
title = {{Flexible Integration of Layout Planning and Adaptive Assembly Systems in Digital Enterprises}},
booktitle = {Proceedings of the 38th CIRP International Seiminar on Manufacturing Systems (CIRP ISMS)},
publisher = {CIRP},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {10--18},
type = {Conference Paper},
month = {May},
year = {2005},
language = {German},
cr-category = {J.6 Computer-Aided Engineering,
C.2.4 Distributed Systems},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2005-33/INPROC-2005-33.pdf},
contact = {Uwe.Heinkel@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The agility and adaptability of an enterprise becomes more and more a desired
feature and a key factor of success. Especially manufacturing enterprises have
to be able to respond quickly to both external and internal changes. This
includes the ability to plan the optimal factory configuration and the assembly
processes with little effort in short time. The tools of the Digital Factory
support manufacturing enterprises shortening planning time and increasing
planning quality. As a motivation of our work in the field of Enterprise
Application Integration, a scenario is described, which reveals how a
transformable manufacturing enterprise reacts to market changes with the
support of digital tools. Motivated by our scenario we developed an integration
solution called Stuttgart Integration Platform. A main role in our approach
plays a solution for the Digital Factory, which stores all needed information
about the planned facility layouts and assembly processes. The paper presents
our central integration solution Champagne, and three integrated systems: the
Digital Factory Solution, the Factory Planning Table and the Assembly
Configuration Tool.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-33&engl=1}
}
@inproceedings {INPROC-2005-32,
author = {Stefan Berger and Christoph Mangold and Sebastian Meyer and Engelbert Westk{\"a}mper},
title = {{Knowledge Management in Assembly}},
booktitle = {Proceedings of the 38th CIRP international seminar on manufacturing systems},
publisher = {CIRP},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
type = {Conference Paper},
month = {May},
year = {2005},
language = {English},
cr-category = {H.3.3 Information Search and Retrieval},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Turbulent business environments force enterprises to ever faster answers and
adaptations in order to secure their competitive ability. Especially mass
customization can be seen as one driver for variable and even smaller number of
pieces till lot size 1. Its impacts can be clearly noticed e.g. in assembly
where today changeover and set up processes define the daily business. In
combination with ever more complex products and the operation of facilities in
technological threshold the (re-)use of workers knowledge and experience get a
key factor for mutability and responsibility. The article will show starting
points for the handling of knowledge in assembly which have been researched
within the SFB467-project 'Knowledge based management for the versatile
assembly' at the University of Stuttgart.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-32&engl=1}
}
@inproceedings {INPROC-2005-17,
author = {Rodrigo Salvador Monteiro and Geraldo Zimbrao and Holger Schwarz and Bernhard Mitschang and Jano Moreira De Souza},
title = {{Building the Data Warehouse of Frequent Itemsets in the DWFIST Approach}},
booktitle = {Proceedings of the 15th International Symposium on Methodologies for Intelligent Systems Saratoga Springs, New York - May 25-28, 2005},
publisher = {Springer},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--9},
type = {Conference Paper},
month = {May},
year = {2005},
isbn = {3-540-25878-7},
language = {English},
cr-category = {H.2.7 Database Administration,
H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Some data mining tasks can produce such great amounts of data that we have to
cope with a new knowledge management problem. Frequent itemset mining fits in
this category. Different approaches were proposed to handle or avoid somehow
this problem. All of them have problems and limitations. In particular, most of
them need the original data during the analysis phase, which is not feasible
for data streams. The DWFIST (Data Warehouse of Frequent ItemSets Tactics)
approach aims at providing a powerful environment for the analysis of itemsets
and derived patterns, such as association rules, without accessing the original
data during the analysis phase. This approach is based on a Data Warehouse of
Frequent Itemsets. It provides frequent itemsets in a flexible and efficient
way as well as a standardized logical view upon which analytical tools can be
developed. This paper presents how such a data warehouse can be built.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-17&engl=1}
}
@inproceedings {INPROC-2005-112,
author = {Marko Vrhovnik},
title = {{Update-Propagation in gestaffelten und verteilten Caches}},
booktitle = {Beitragsband zum Studierenden-Programm bei der 11. Fachtagung ``Datenbanken f{\"u}r Business, Technologie and Web'', GI Fachbereich Datenbanken und Informationssysteme, Karlsruhe, 1. M{\"a}rz 2005},
publisher = {Universit{\"a}t Magdeburg, Fakult{\"a}t f{\"u}r Informatik},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {43--45},
type = {Conference Paper},
month = {March},
year = {2005},
language = {German},
cr-category = {C.2.4 Distributed Systems,
H.2.4 Database Management Systems,
H.3.3 Information Search and Retrieval},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2005-112/INPROC-2005-112.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In der Forschungsgruppe NEXUS der Universit{\"a}t Stuttgart wird eine offene
Plattform f{\"u}r ortsbezogene Anwendungen entwickelt. Durch die Offenheit k{\"o}nnen
beliebige Datenanbieter ihre Informationen durch die NEXUS-Plattform
bereitstellen. Eine F{\"o}derations-Middleware verbirgt vor einer Anwendung die
Verteilung der Daten und kombiniert Daten verschiedener Anbieter in geeigneter
Weise. Zur Beschleunigung des Datenzugriffs werden in der NEXUS-Plattform
Zwischenspeicher (Caches) eingesetzt, u.a. in der F{\"o}derations-Middleware, als
auch in mobilen Endger{\"a}ten, auf denen NEXUS-Anwendungen typischerweise
ausgef{\"u}hrt werden. Durch eine solche Staffelung und Verteilung von Caches
k{\"o}nnen Daten ``n{\"a}her'' an eine Anwendung positioniert und entsprechend schneller
geliefert werden. Um die Konsistenz zwischengespeicherter Daten sicherstellen
zu k{\"o}nnen, m{\"u}ssen Aktualisierungen auch in den Caches vollzogen werden. In
dieser Diplomarbeit wurde der L{\"o}sungsraum zur Propagation von Aktualisierungen
(Updates) zu den jeweiligen Caches durchleuchtet. Dabei wurden verschiedene
Cache-Konsistenzsemantiken erstellt und Strategien entwickelt, wie diese im
NEXUS-System umgesetzt werden k{\"o}nnen. Ferner wurde untersucht, welche
Auswirkungen die einzelnen L{\"o}-sungsans{\"a}tze auf die Autonomie der einzelnen
Komponenten der NEXUS-Plattform haben.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-112&engl=1}
}
@inproceedings {INPROC-2005-111,
author = {Albert Maier and Bernhard Mitschang and Frank Leymann and Wolfson Dan},
title = {{On Combining Business Process Integration and ETL Technologies}},
booktitle = {Datenbanksysteme in Business, Technologie und Web (BTW'05)},
editor = {Gesellschaft f{\"u}r Informatik},
publisher = {K{\"o}llen},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {533--546},
type = {Conference Paper},
month = {March},
year = {2005},
isbn = {3-88579-394-6},
language = {English},
cr-category = {H.2.8 Database Applications,
H.3.3 Information Search and Retrieval},
ee = {http://btw2005.aifb.uni-karlsruhe.de/},
department = {University of Stuttgart, Institute of Architecture of Application Systems, Architecture of Application Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {On Combining Business Provess Integration and ETL Technologies},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-111&engl=1}
}
@inproceedings {INPROC-2005-110,
author = {Cataldo Mega and Frank Wagner and Bernhard Mitschang},
title = {{From Content Management to Enterprise Content Management}},
booktitle = {Datenbanksysteme in Business, Technologie und Web},
editor = {Gesellschaft f{\"u}r Informatik},
publisher = {K{\"o}llen},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {596--613},
type = {Conference Paper},
month = {March},
year = {2005},
isbn = {3-88579-394-6},
language = {English},
cr-category = {H.2.8 Database Applications,
H.3.3 Information Search and Retrieval},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In this paper we will provide a step-by-step description on what it means to
evolve the architecture of a traditional content management system into an
Enterprise Content Management (ECMS) system, explain the differences of both
systems and motivate that this transformation is necessary. By analyzing
business scenarios in the realm of different content management domains, we
will explain why today’s content management systems struggle when it comes to
satisfy the need for performance, scalability and business resilience. Using
the system design of IBM DB2 Content Manager as reference point we will outline
and discuss some of the new key technical challenges found when promoting
ondemand ECM services and look at their affordability. By detailing a few
representative use cases we will perform a problem analysis, and an attempt
will be made to present an enhanced ECM system design that makes use of a
component ‚virtualization’ approach in order to allow for a dynamic services
infrastructure to be setup and which capitalizes on proven peer-peer and grid
technology.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-110&engl=1}
}
@inproceedings {INPROC-2005-06,
author = {Mih{\'a}ly Jakob and Matthias Grossmann and Daniela Nicklas and Bernhard Mitschang},
title = {{DCbot: Finding Spatial Information on the Web}},
booktitle = {Proceedings of the 10th International Conference on Database Systems for Advanced Applications (DASFAA 2005)},
address = {Beijing},
publisher = {??},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
type = {Conference Paper},
month = {April},
year = {2005},
language = {German},
cr-category = {H.2.8 Database Applications,
H.3.3 Information Search and Retrieval,
H.5.4 Hypertext/Hypermedia},
ee = {http://www.nexus.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The WWW provides an overwhelming amount of information, which spatially indexed
can be a valuable additional data source for location- based applications. By
manually building a spatial index, only a fraction of the available resources
can be covered. This paper introduces a system for the automatic mapping of web
pages to geographical locations. Our web robot uses several sets of domain
specific keywords, lexical context rules, that are automatically learned, and a
hierarchical catalogue of geographical locations that provides exact
geographical coordinates for locations. Spatially indexed web pages are used to
construct Geographical Web Portals, which can be accessed by different
location-based applications. In addition, we present experimental results
demonstrating the quantity and the quality of automatically indexed web pages.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-06&engl=1}
}
@inproceedings {INPROC-2005-05,
author = {Uwe-Philipp K{\"a}ppeler and Georg Kindermann and Daniela Nicklas and Nicola H{\"o}nle and Dominique Dudkowski},
title = {{Shared Dynamic Context Models: Benefits for Advanced Sensor Data Fusion for Autonomous Robots}},
booktitle = {Proceedings of Artificial Intelligence and Applications 2005; Innsbruck, Austria, February, 14-16, 2005},
publisher = {IASTED},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
type = {Conference Paper},
month = {February},
year = {2005},
keywords = {Autonomous; Robots; Sensor; Fusion; Nexus; Context; Sensorfusion},
language = {English},
cr-category = {H.3.3 Information Search and Retrieval,
I.2.9 Robotics},
contact = {kaeppeler@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems, Image Understanding;
University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Information provided by a shared dynamic context model offers new possibilities
in the realm of autonomous robots. The availability of external context
information can be used by a robot to extend and to validate the locally
acquired knowledge about its dynamic environment. A lot of helpful context
information is already available in digital form and its quantity will increase
rapidly, according to the vision of ubiquitous computing.
In this paper we present the Nexus Platform capable of managing a global
dynamic context model that can be addressed and accessed in an easy and uniform
way by all kinds of context-aware applications (like robots). Advantages for an
autonomous wheelchair robot using the Nexus Platform are depicted within an
airport scenario. We further present a framework for a sensor fusion agent,
able to perform multi-sensor data fusion with selective attention control,
concerning the current situation of the environment.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-05&engl=1}
}
@inproceedings {INPROC-2005-04,
author = {Nicola H{\"o}nle and Uwe-Philipp K{\"a}ppeler and Daniela Nicklas and Thomas Schwarz},
title = {{Benefits Of Integrating Meta Data Into A Context Model}},
booktitle = {Proceedings of 2nd IEEE PerCom Workshop on Context Modeling and Reasoning (CoMoRea) (at 3rd IEEE International Conference on Pervasive Computing and Communication (PerCom'05)); Hawaii, March 12, 2005},
publisher = {IEEE},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
type = {Conference Paper},
month = {March},
year = {2005},
keywords = {meta data; context model; context-aware applications},
language = {English},
cr-category = {H.2.1 Database Management Logical Design},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems, Image Understanding},
abstract = {Meta data—data about data—improves the value of the operational data by giving
applications and users additional information on the data’s origin, its
precision or staleness. We outline the benefits of modeling meta data in
context models: it can be used for resource finding, enhanced data selection,
trust and data quality issues and sensor fusion. We show how meta data included
into an object-based context model influences the data modeling and the
selection process in the query language. Finally, we describe our
implementation of the presented functionality in the Nexus platform.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-04&engl=1}
}
@inproceedings {INPROC-2005-03,
author = {Matthias Grossmann and Martin Bauer and Nicola H{\"o}nle and Uwe-Philipp K{\"a}ppeler and Daniela Nicklas and Thomas Schwarz},
title = {{Efficiently Managing Context Information for Large-scale Scenarios}},
booktitle = {Proceedings of the 3rd IEEE Conference on Pervasive Computing and Communications: PerCom2005; Kauai Island, Hawaii, March 8-12, 2005},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
type = {Conference Paper},
month = {March},
year = {2005},
language = {English},
cr-category = {H.3.0 Information Storage and Retrieval General,
H.3.4 Information Storage and Retrieval Systems and Software},
ee = {http://www.nexus.uni-stuttgart.de},
contact = {matthias.grossmann@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems, Image Understanding;
University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
abstract = {In this paper, we address the data management aspect of large-scale pervasive
computing systems. We aim at building an infrastructure that simultaneously
supports many kinds of context-aware applications, ranging from room level up
to nation level. This allembracing approach gives rise to synergetic benefits
like data reuse and sensor sharing. We identify major classes of context data
and detail on their characteristics relevant for efficiently managing large
amounts of it. Based on that, we argue that for large-scale systems it is
beneficial to have special-purpose servers that are optimized for managing a
certain class of context data. In the Nexus project we have implemented five
servers for different classes of context data and a very flexible federation
middleware integrating all these servers. For each of them, we highlight in
which way the requirements of the targeted class of data are tackled and
discuss our experiences.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-03&engl=1}
}
@inproceedings {INPROC-2005-02,
author = {Mih{\'a}ly Jakob and Matthias Grossmann and Nicola H{\"o}nle and Daniela Nicklas},
title = {{DCbot: Exploring the Web as Value-added Service for Location-based Applications}},
booktitle = {Proceedings of the 21st International Conference on Data Engineering, ICDE 2005, April 5-8, 2005, Tokyo, Japan},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
type = {Conference Paper},
month = {April},
year = {2005},
language = {English},
cr-category = {H.2.8 Database Applications,
H.3.1 Content Analysis and Indexing,
H.3.3 Information Search and Retrieval},
ee = {http://www.nexus.uni-stuttgart.de},
contact = {mihaly.jakob@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Location-based applications need information that can be spatially accessed.
Typically, this data is exspecially gathered and preprocessed in a particular
way (e.g. in a spatial data base). Up to now most of these applications ignore
an existing large information space, the World Wide Web. Web pages can be
mapped to locations and then accessed by location-aware applications with
spatial predicates. We want to automate the process of mapping web pages to
locations because of the huge amount of data available in the WWW. Our web
robot DCbot analyses web pages using pre-defined rules and spatial knowledge
and maps them to locations. In the demonstration we will show some location
mapping results of DCbot and, in addition, we will let DCbot scan locally
stored web pages at demonstration time, so that effects of changes of the
content can be shown.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2005-02&engl=1}
}
@inproceedings {INPROC-2004-44,
author = {Carmen Constantinescu and Sergey Kornienko and Olga Kornienko and Uwe Heinkel},
title = {{An agent-based approach to support the scalability of change propagation}},
booktitle = {Proceedings of the ISCA 17th International Conference on PARALLEL AND DISTRIBUTED COMPUTING SYSTEMS},
editor = {D.A. Bader and A. A. Khokhar},
publisher = {ISCA},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {157--164},
type = {Conference Paper},
month = {September},
year = {2004},
isbn = {1-880843-52-8},
language = {German},
cr-category = {I.2.11 Distributed Artificial Intelligence,
H.3.4 Information Storage and Retrieval Systems and Software},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems, Image Understanding},
abstract = {In this paper, we address aspects of the challenge facing the Data Integration
solutions in the problem of increasing the scalability. We first overview our
results in data integration and present our prototype. Searching for a
motivation to employ the agent-based technology and envision how this
technology can be applied to improve the system scalability represent the next
step of our research. For that, we examine some formal definitions and metrics
of scalability which fit the purpose of system description. The analysis of our
data integration solution, a change propagation system called Champagne,
identifies the system components having the most influence on the scalability
problem. We propose to employ the agent-based technology to improve the
scalability and perform some experiments which reveal our work.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2004-44&engl=1}
}
@inproceedings {INPROC-2004-42,
author = {Thomas Schwarz and Markus Iofcea and Matthias Grossmann and Nicola H{\"o}nle and Daniela Nicklas and Bernhard Mitschang},
title = {{On Efficiently Processing Nearest Neighbor Queries in a Loosely Coupled Set of Data Sources}},
booktitle = {Proceedings of the 12th ACM International Symposium on Advances in Geographic Information System (ACM GIS 004), Washington D.C., November 12-13, 2004},
editor = {ACM},
publisher = {?},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
type = {Conference Paper},
month = {November},
year = {2004},
keywords = {Data integration, distributed query processing, federated database system, kNN, nearest neighbors, parallel query processing},
language = {English},
cr-category = {H.2.4 Database Management Systems,
H.2.8 Database Applications,
H.3.3 Information Search and Retrieval},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2004-42/INPROC-2004-42.pdf},
contact = {thomas.schwarz@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {We propose a family of algorithms for processing nearest neighbor (NN) queries
in an integration middleware that provides federated access to numerous loosely
coupled, autonomous data sources connected through the internet. Previous
approaches for parallel and distributed NN queries considered all data sources
as relevant, or determined the relevant ones in a single step by exploiting
additional knowledge on object counts per data source. We propose a different
approach that does not require such detailed statistics about the distribution
of the data. It iteratively enlarges and shrinks the set of relevant data
sources. Our experiments show that this yields considerable performance
benefits with regard to both response time and effort. Additionally, we propose
to use only moderate parallelism instead of querying all relevant data sources
at the same time. This allows us to trade a slightly increased response time
for a lot less effort, hence maximizing the cost profit ratio, as we show in
our experiments. Thus, the proposed algorithms clearly extend the set of NN
algorithms known so far.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2004-42&engl=1}
}
@inproceedings {INPROC-2004-41,
author = {Daniela Nicklas and Nicola H{\"o}nle and Michael Moltenbrey and Bernhard Mitschang},
title = {{Design and Implementation Issues for Explorative Location-based Applications: the NexusRallye}},
booktitle = {Proceedings for the VI Brazilian Symposium on GeoInformatics: GeoInfo 2004; November 22-24, 2004},
editor = {Gilberto Camara Cirano Iochpe},
address = {Sao Jose dos Campos},
publisher = {INPE},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {167--181},
type = {Conference Paper},
month = {November},
year = {2004},
isbn = {3-901882-20-0},
keywords = {location-based services, context-awareness, mobile applications},
language = {German},
cr-category = {H.2.8 Database Applications,
H.5.3 Group and Organization Interfaces},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2004-41/INPROC-2004-41.pdf,
http://www.nexus.uni-stuttgart.de},
contact = {danickla@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Explorative Location-based Applications (eLBA) define a new class of
applications that rely on both positioning (i.e. location information) and
georeferenced information in addition to a flexible and efficient system
infrastructure that supports a mobile and ubiquitous usage. In this paper we
define first a modeling framework to design eLBAs that builds on the concept of
tasks as a very valuable system/user interaction and application structuring
concept. In addition, we report on a system framework, the Nexus platform, that
efficiently provides access to georeferenced information and positioning
information. Our sample application, the NexusRallye, is used to exemplify
important aspects of our solution platform and to show its advantages as
compared to other approaches.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2004-41&engl=1}
}
@inproceedings {INPROC-2004-40,
author = {Thomas Schwarz and Nicola H{\"o}nle and Matthias Grossmann and Daniela Nicklas and Bernhard Mitschang},
title = {{Efficient Domain-Specific Information Integration in Nexus}},
booktitle = {Proceedings of the 2004 VLDB Workshop on Information Integration on the Web : IIWeb-2004 ; Toronto, Canada, August 30, 2004},
editor = {Hasan Davulcu and Nick Kushmerick},
publisher = {online},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {122--127},
type = {Conference Paper},
month = {August},
year = {2004},
keywords = {Nexus; information integration; domain-specific},
language = {English},
cr-category = {H.3.4 Information Storage and Retrieval Systems and Software,
H.3.3 Information Search and Retrieval},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2004-40/INPROC-2004-40.pdf,
http://cips.eas.asu.edu/iiwebfinalproceedings/52.pdf,
http://cips.eas.asu.edu/iiweb-proceedings.html},
contact = {thomas.schwarz@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In this paper, we present the Nexus approach to efficient domain-specific
integration of many loosely coupled data sources. A so called information
maximizing mediation middleware (IMMM) has to cope with large data volumes and
many queries, and at the same time achieve a tight semantic integration for the
data instances. For efficiency and practicability reasons, we propose to use an
extensible global schema and a limited domain-specific query language. This
facilitates employing domain-specific semantic knowledge in the middleware:
detect duplicates, merge multiple representations, aggregate and generalize
information. Finally, we present a working prototype tailored to the domain of
context-aware applications.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2004-40&engl=1}
}
@inproceedings {INPROC-2004-38,
author = {Christian Becker and Daniela Nicklas},
title = {{Where do spatial context-models end and where do ontologies start? A proposal of a combined approach}},
booktitle = {Proceedings of the First International Workshop on Advanced Context Modelling, Reasoning and Management in conjunction with UbiComp 2004},
editor = {Jadwiga Indulska and David De Roure},
address = {Nottingham, England},
publisher = {University of Southhampton},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {48--53},
type = {Conference Paper},
month = {September},
year = {2004},
isbn = {854328130},
keywords = {Context-Models, Adaptation, Infrastructures for Context-Aware Computing, Ubiquitous Computing},
language = {German},
cr-category = {H.2.1 Database Management Logical Design},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2004-38/INPROC-2004-38.pdf},
contact = {christian.becker@informatik.uni-stuttgart.de, daniela.nicklas@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Context-aware applications adapt their behavior depending on the state of the
physical world along with other information representing context. This requires
context management, i.e., the efficient management of context information and
feasible context representations in order to allow reasoning. This paper
discusses two common approaches, spatial context models and contextual
ontologies, and argues for a combined approach providing the efficiency of
context management through context models combined with the semantic
expressiveness of ontologies.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2004-38&engl=1}
}
@inproceedings {INPROC-2004-30,
author = {Tobias Kraft and Holger Schwarz},
title = {{CHICAGO: A Test and Evaluation Environment for Coarse-Grained Optimization}},
booktitle = {Proceedings of the 30th International Conference on Very Large Databases, Toronto, Canada, August 29th - September 3rd, 2004},
publisher = {Morgan Kaufmann},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1345--1348},
type = {Conference Paper},
month = {August},
year = {2004},
isbn = {0-12-088469-0},
language = {German},
cr-category = {H.2.4 Database Management Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Relational OLAP tools and other database applications generate sequences of SQL
statements that are sent to the database server as result of a single
information request issued by a user. Coarse-Grained Optimization is a
practical approach for the optimization of such statement sequences based on
rewrite rules. In this demonstration we present the CHICAGO test and evaluation
environment that allows to assess the effectiveness of rewrite rules and
control strategies. It includes a lightweight heuristic optimizer that modifies
a given statement sequence using a small and variable set of rewrite rules.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2004-30&engl=1}
}
@inproceedings {INPROC-2004-29,
author = {Frank D{\"u}rr and Nicola H{\"o}nle and Daniela Nicklas and Christian Becker and Kurt Rothermel},
title = {{Nexus--A Platform for Context-Aware Applications}},
booktitle = {1. GI/ITG Fachgespr{\"a}ch Ortsbezogene Anwendungen und Dienste.},
editor = {J{\"o}rg Roth},
address = {Hagen},
publisher = {FernUniversit{\"a}t in Hagen},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
series = {Informatik-Berichte der FernUniversit{\"a}t in Hagen},
volume = {317},
pages = {15--18},
type = {Conference Paper},
month = {June},
year = {2004},
keywords = {Location-based Service; Context-aware System; Platform; Geocast},
language = {English},
cr-category = {C.2.4 Distributed Systems,
H.2.4 Database Management Systems,
H.2.8 Database Applications,
H.3.4 Information Storage and Retrieval Systems and Software,
C.2.2 Network Protocols},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2004-29/INPROC-2004-29.pdf},
contact = {Senden Sie eine E-Mail an frank.duerr@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In this paper we present the Nexus Platform for context-aware applications.
This platform allows to share the effort of setting up a global and detailed
context model between different providers by federating their partial models.
Applications can query data directly, use push-based communication through an
event service, or use value-added services like a navigation or map service for
special tasks. Additional context-aware services like hoarding or geocast can
be implemented on basis of the platform. For the latter we present different
approaches for addressing and efficient message forwarding based on the Nexus
Platform.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2004-29&engl=1}
}
@inproceedings {INPROC-2004-24,
author = {Tobias Drosdol and Thomas Schwarz and Martin Bauer and Matthias Gro{\ss}mann and Nicola H{\"o}nle and Daniela Nicklas},
title = {{Keeping Track of ``Flying Elephants'': Challenges in Large-Scale Management of Complex Mobile Objects}},
booktitle = {Proceedings of INFORMATIK 2004 - the Thirty-Fourth Annual Conference of the Gesellschaft f{\"u}r Informatik e.V.(GI); Ulm, Germany, September 20-24, 2004. Vol. 1},
editor = {Peter Dadam and Manfred Reichert},
address = {Bonn},
publisher = {K{\"o}llen Druck+Verlag GmbH},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
series = {Lecture Notes in Informatics},
volume = {P-50},
pages = {288--292},
type = {Conference Paper},
month = {September},
year = {2004},
isbn = {3-88579-379-2},
language = {English},
cr-category = {C.2.4 Distributed Systems,
H.2.4 Database Management Systems,
H.2.8 Database Applications,
H.3.4 Information Storage and Retrieval Systems and Software},
contact = {Tobias Drosdol Tobias.Drosdol@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems},
abstract = {The management of mobile objects like cars, persons, or workpieces in a factory
is an important task in many context-aware environments. So far, most solutions
can either cope with many small objects (few properties) or with a limited
number of complex objects in a centralized way. In this paper, we face the
challenge of managing a large number of bulky mobile objects (flying
elephants). We state requirements, propose basic components, and discuss
alternative architectures based on the main influencing factors like mobility
and update rate.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2004-24&engl=1}
}
@inproceedings {INPROC-2004-19,
author = {Thomas Schwarz and Nicola H{\"o}nle and Matthias Gro{\ss}mann and Daniela Nicklas},
title = {{A Library for Managing Spatial Context Using Arbitrary Coordinate Systems}},
booktitle = {Workshop on Context Modeling and Reasoning : CoMoRea'04 ; In: Workshops-Proceedings of the 2nd IEEE Conference on Pervasice Computing and Communications : PerCom2004 ; Orlando, Florida, March 14-17, 2004},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {48--52},
type = {Conference Paper},
month = {March},
year = {2004},
keywords = {coordinate transformation, basic geometry types, spatial reference system, data integration, library for context-aware applications},
language = {English},
cr-category = {I.3.5 Computational Geometry and Object Modeling,
H.2.8 Database Applications,
J.2 Physical Sciences and Engineering},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2004-19/INPROC-2004-19.pdf,
http://nexus.informatik.uni-stuttgart.de/COMOREA/,
http://www.percom.org/percom_2004/index.htm,
http://www.nexus.uni-stuttgart.de/},
contact = {thomas.schwarz@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Since location is an important part of context, the management of spatial
information is important for many context-aware applications, e.g. the position
or the extent of users, sensors, rooms or buildings. Coordinates always have a
coordinate system (CS) associated to them. Numerous CSs exist and a lot of them
are commonly used, thus conversion becomes a necessity. We introduce a library
that implements the OGC Simple Feature Specification and can dynamically cope
with different CSs, enabling interoperability between applications, middleware
components and data providers. We illustrate functions and features, describe a
common CS determination algorithm and point out our lessons learned: avoid
transformations, use existing standards but dare to extend them when needed.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2004-19&engl=1}
}
@inproceedings {INPROC-2004-02,
author = {Othmar Lehmann and Martin Bauer and Christian Becker and Daniela Nicklas},
title = {{From Home to World - Supporting Context-aware Applications through World Models}},
booktitle = {Proceedings of the Second IEEE International Conference on Pervasive Computing and Communications},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {297--306},
type = {Conference Paper},
month = {March},
year = {2004},
keywords = {context; context-awareness; pervasive computing; ubiquitous computing; world model; Nexus},
language = {English},
cr-category = {C.2.4 Distributed Systems,
H.2.1 Database Management Logical Design,
H.3.4 Information Storage and Retrieval Systems and Software},
ee = {http://www.percom.org/,
http://www.nexus.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In the vision of pervasive computing smart everyday objects communicate and
cooperate to provide services and information to users. Interoperability
between devices and applications not only requires common protocols but also
common context management. In this paper we discuss requirements on the context
management based on the Georgia Tech's Aware Home environment and the global
context management perspective of the Nexus project. Our experiences with
integrating the Aware Home Spatial Service into the Nexus platform show how
federation concepts and a common context model can provide applications with
uniform context information in different administrative and application
domains.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2004-02&engl=1}
}
@inproceedings {INPROC-2003-40,
author = {Christoph Mangold and Bernhard Mitschang},
title = {{Enabling a Reuse Oriented Engineering Methodology}},
booktitle = {Proceedings of the Second IASTED International Conference on Information and Knowledge Sharing},
editor = {Wesley Chu},
address = {Anaheim, Calgary, Zurich},
publisher = {ACTA Press},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {6--11},
type = {Conference Paper},
month = {November},
year = {2003},
isbn = {0-88986-396-2},
keywords = {Engineering Methodology; Reuse; Representation; Knowledge Engineering and Management},
language = {English},
cr-category = {H.1.1 Systems and Information Theory,
H.1.2 User/Machine Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In the domain of machine engineering the reuse of design data is an important
but complex instrument to improve quality and shorten development time. The
successful reuse of design data requires both, a machine engineering specific
methodology and the support by appropriate information system technology. In
this paper we introduce a machine engineering methodology that is based on
several reuse stages. Our approach to support the methodology with an
appropriate information system uses a simple graph based information model
where reuse stages are re- flected as layers. We also show how reuse is
represented in the model.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2003-40&engl=1}
}
@inproceedings {INPROC-2003-27,
author = {Marcello Mariucci and Bernhard Mitschang},
title = {{Extending Web Service Technology towards an Earth Observation Integration Framework}},
booktitle = {Proceedings of the Forum Session at the First International Conference on Service Oriented Computing: ICSOC03; Trento, Italy, Dec. 15-18, 2003},
editor = {Marco Aiello and Chris Bussler and Vincenzo D'Andrea and Jian. Yang},
address = {Trento},
publisher = {University of Trento},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {Technical Report, Information and Communication Technology, University of Trento},
volume = {DIT-03-056},
pages = {117--128},
type = {Conference Paper},
month = {November},
year = {2003},
keywords = {Earth Observation; Framework; Web Services; Repository; Workflow Management},
language = {English},
cr-category = {D.2.2 Software Engineering Design Tools and Techniques,
D.2.6 Software Engineering Programming Environments,
D.2.9 Software Engineering Management,
D.2.10 Software Engineering Design,
D.2.11 Software Engineering Software Architectures,
D.2.13 Software Engineering Reusable Software,
H.3.4 Information Storage and Retrieval Systems and Software,
H.4 Information Systems Applications},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2003-27/INPROC-2003-27.pdf,
ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2003-27/INPROC-2003-27.ps},
contact = {mariucci@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In this paper we describe the implementation of a service-based application
integration solution for the complex domain of Earth Observation (EO)
application systems. The presented approach is based on an EO integration
framework. It supports the concatenation of disparate software applications to
flexible EO process chains. Resulting EO services are provided to end users by
means of Web Service technology. We demonstrate that current standard
technology is not sufficient to dynamically publish and interactively invoke EO
services over the Web. We describe necessary extensions and adaptations.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2003-27&engl=1}
}
@inproceedings {INPROC-2003-23,
author = {Marcello Mariucci and Frank Wagner and Gunter Martens and Jens K{\"u}nzl},
title = {{ARSENAL: Model-driven Earth Observation Integration Framework}},
booktitle = {Proceedings of the Fifth International Conference on Information Integration and Web-based Applications \& Services: iiWAS '03, Demonstration Paper; Jakarta, Indonesia, September 15-17, 2003. Vol. 170},
editor = {Gabriele Kotsis and Stephane Bressan and Barbara Catania and Ismail Khalil Ibrahim},
publisher = {Oesterreichische Computer Gesellschaft},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {227--231},
type = {Conference Paper},
month = {September},
year = {2003},
isbn = {3-85403-170-10},
keywords = {Implementation Architecture Prototype, Model-driven Architecture, Process-oriented Application Integration, Model-driven Integration Process, Repository System},
language = {English},
cr-category = {H.3.5 Online Information Services,
J.2 Physical Sciences and Engineering,
D.2.2 Software Engineering Design Tools and Techniques},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2003-23/INPROC-2003-23.pdf},
contact = {For more information please contact mariucci@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {An Earth Observation (EO) integration framework is an application integration
solution for sup-porting the development and execution of EO services. EO
services are based on the intensive use of large data sets from space. They
process raw EO data sets into increasingly specialized products until a certain
level of quality is achieved. This processing requires the tight cooperation of
several distributed experts, and intensive computation across a coordinated
sequence of both, interactive and automatic processing steps. Appropriate
examples for such EO services are the generation of weather forecasts, forest
fire detection, and oil slick monitoring. EO services can be characterized as
highly flexible structures, which constantly need to be adapted to evolving
spacecraft and proc-essing technologies. We suggest a model-driven EO
integration framework solution that adequately copes with the flexible
development, customization, and execution of reusable EO services. Our
prototype includes a comprehensive integration model that accurately handles
system metadata throughout the software life cycle, and significantly enhances
the EO service development process in terms of quality, reuse, and
adaptability. The prototype employs repository technology for man-aging model
related issues, as well as workflow and Web service technology for execution
pur-poses. It is mainly built upon commercial products, which are seamlessly
combined by appropriate 'glue' components.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2003-23&engl=1}
}
@inproceedings {INPROC-2003-22,
author = {Marcello Mariucci and Clemens Dorda and Bernhard Mitschang},
title = {{Design and Implementation of a Model-driven Earth Observation Integration Framework}},
booktitle = {Proceedings of the Fifth International Conference on Information Integration and Web-based Applications \& Services: iiWAS '03; Jakarta, Indonesia, September 15-17, 2003. Vol. 170},
editor = {Gabriele Kotsis and Stephane Bressan and Barbara Catania and Ismail Khalil Ibrahim},
publisher = {Oesterreichische Computer Gesellschaft},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {215--225},
type = {Conference Paper},
month = {September},
year = {2003},
isbn = {3-85403-170-10},
keywords = {Model-driven Architecture, Process-oriented Application Integration, Model-driven Integration Process, Repository System},
language = {English},
cr-category = {H.3.5 Online Information Services,
J.2 Physical Sciences and Engineering,
D.2.2 Software Engineering Design Tools and Techniques},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2003-22/INPROC-2003-22.pdf,
ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2003-22/INPROC-2003-22.ps},
contact = {For more information please contact mariucci@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {An Earth Observation (EO) integration framework is an application integration
solution for supporting the development and execution of EO services. EO
services are based on the intensive use of large data sets from space, and
require the tight cooperation of several distributed experts. They can be
characterized as highly flexible structures, which constantly need to be
adapted to evolving spacecraft and processing technologies. In this paper we
introduce a model-driven approach for an EO integration framework. We describe
a comprehensive integration model that adequately copes with the flexible
development, customization, and execution of EO services. The accurate
treatment of related model instances throughout the software life cycle
significantly enhances the EO service development process in terms of quality,
reuse, and adaptability. We discuss technological aspects for the realization
of such an integration framework, and outline our prototype implementation that
is mainly built upon commercial products. We demonstrate that such a
model-driven approach can be realized by employing repository technology for
managing model related issues, as well as workflow and Web service technology
for execution purposes.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2003-22&engl=1}
}
@inproceedings {INPROC-2003-17,
author = {Kurt Rothermel and Dieter Fritsch and Bernhard Mitschang and Paul J. K{\"u}hn and Martin Bauer and Christian Becker and Christian Hauser and Daniela Nicklas and Steffen Volz},
title = {{SFB 627: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme}},
booktitle = {Proceedings Informatik 2003},
address = {Frankfurt},
publisher = {Gesellschaft f{\"u}r Informatik},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
type = {Conference Paper},
month = {September},
year = {2003},
keywords = {Umgebungsmodelle; kontext-bezogene Systeme; Nexus; Spatial World Models; context-aware systems},
language = {German},
cr-category = {C.2.4 Distributed Systems,
H.2.4 Database Management Systems,
H.2.8 Database Applications,
H.3.4 Information Storage and Retrieval Systems and Software},
ee = {http://www.nexus.uni-stuttgart.de},
contact = {Kurt.Rothermel@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
Universit{\"a}t Stuttgart, Institut f{\"u}r Kommunikationsnetze und Rechnersysteme (IKR);
Universit{\"a}t Stuttgart, Institut f{\"u}r Photogrammetrie (ifp)},
abstract = {Ziel des Sonderforschungsbereichs 627 ``Umgebungsmodelle f{\"u}r mobile
kontextbezogene Systeme'' ist die Erforschung von Methoden und Verfahren f{\"u}r die
Definition, die Verwaltung und die Nutzung von digitalen Umgebungsmodellen.
Existierende Informationsr{\"a}ume werden mit komplexen Modellen der realen Welt
verschr{\"a}nkt und erlauben so neuartige Anwendungen. Insbesondere die Klasse der
ortsbezogenen Anwendungen und aktuelle Forschungsgebiete wie das Ubiquitous
Computing k{\"o}nnen von solchen Umgebungsmodellen profitieren, oder werden durch
sie erst erm{\"o}glicht.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2003-17&engl=1}
}
@inproceedings {INPROC-2003-16,
author = {Daniela Nicklas and Matthias Grossmann and Thomas Schwarz},
title = {{NexusScout: An Advanced Location-Based Application On A Distributed, Open Mediation Platform}},
booktitle = {Proceedings of the 29th VLDB Conference, Berlin, Germany, 2003},
publisher = {Morgan Kaufmann Publishers},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {0--1},
type = {Conference Paper},
month = {September},
year = {2003},
isbn = {0-12-722442-4},
language = {English},
cr-category = {A.0 General Literature, General},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {This demo shows several advanced use cases of location-based services and
demonstrates how these use cases are facilitated by a mediation middleware for
spatial information, the Nexus Platform. The scenario shows how a mobile user
can access location-based information via so called Virtual Information Towers,
register spatial events, send and receive geographical messages or find her
friends by displaying other mobile users. The platform facilitates these
functions by transparently combining spatial data from a dynamically changing
set of data providers, tracking mobile objects and observing registered spatial
events.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2003-16&engl=1}
}
@inproceedings {INPROC-2003-04,
author = {Tobias Kraft and Holger Schwarz and Ralf Rantzau and Bernhard Mitschang},
title = {{Coarse-Grained Optimization: Techniques for Rewriting SQL Statement Sequences}},
booktitle = {Proceedings of 29th International Conference on Very Large Data Bases (VLDB 2003), Berlin, September 9-12, 2003},
publisher = {Morgan Kaufmann},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
pages = {488--499},
type = {Conference Paper},
month = {September},
year = {2003},
isbn = {0-12-722442-4},
keywords = {SQL; Query Optimization; OLAP},
language = {English},
cr-category = {H.2.4 Database Management Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Relational OLAP tools and other database applications generate sequences of SQL
statements that are sent to the database server as result of a single
information request provided by a user. Unfortunately, these sequences cannot
be processed efficiently by current database systems because they typically
optimize and process each statement in isolation. We propose a practical
approach for this optimization problem, called ``coarse-grained optimization,''
complementing the conventional query optimization phase. This new approach
exploits the fact that statements of a sequence are correlated since they
belong to the same information request. A lightweight heuristic optimizer
modifies a given statement sequence using a small set of rewrite rules. Since
the optimizer is part of a separate system layer, it is independent of but can
be tuned to a specific underlying database system. We discuss implementation
details and demonstrate that our approach leads to significant performance
improvements.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2003-04&engl=1}
}
@inproceedings {INPROC-2003-03,
author = {Bernhard Mitschang and Engelbert Westk{\"a}mper and Carmen Constantinescu and Uwe Heinkel and Benno L{\"o}ffler and Ralf Rantzau and Ralph Winkler},
title = {{Divide et Impera: A Flexible Integration of Layout Planning and Logistics Simulation through Data Change Propagation}},
booktitle = {Proceedings of the 36th CIRP International Seminar on Manufacturing Systems (CIRP ISMS 2003), June 03-05, 2003 Saarland University, Saarbr{\"u}cken, Germany},
editor = {C. Weber and H. Bley and G. Hirt},
address = {Saarbr{\"u}cken, Germany},
publisher = {Saarland University},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
pages = {411--418},
type = {Conference Paper},
month = {June},
year = {2003},
isbn = {3-930429-58-6},
keywords = {integration of information systems; XML technologies; facility layout planning; logistics simulation},
language = {English},
cr-category = {H.2.5 Heterogeneous Databases,
J.6 Computer-Aided Engineering},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2003-03/INPROC-2003-03.pdf},
contact = {Uwe.Heinkel@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {The turbulent markets lead to new challenges for today’s enterprises, they have
to be transformable to stay competitive. Therefore, we developed a new approach
that integrates Logistic Simulation and Layout Planning to fulfil the goal of
improving the production system. Our approach is based on propagation and
transformation of data changes concerning the continuous adaptation tasks among
the Layout Planning and Logistics Simulation systems. Instead of relying on a
tightly integrated global data schema, we connect systems only as far as
required by building “bridges” between them. The systems that participate in
the integration are kept autonomous. We use several state-of-the-art XML
technologies in our integration system.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2003-03&engl=1}
}
@inproceedings {INPROC-2003-02,
author = {Ralf Rantzau},
title = {{Processing Frequent Itemset Discovery Queries by Division and Set Containment Join Operators}},
booktitle = {Proceedings of the ACM SIGMOD Workshop on Research Issues in Data Mining and Knowledge Discovery (DMKD), San Diego, California, USA, June 13, 2003},
editor = {Mohammed Zaki and Charu Aggarwal},
publisher = {Rensselaer Polytechnic Institute, Troy, New York 12180-3590, USA},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
series = {Report No. 03-05},
pages = {20--27},
type = {Conference Paper},
month = {June},
year = {2003},
keywords = {association rule discovery; relational division; set containment join},
language = {English},
cr-category = {H.2.4 Database Management Systems,
H.2.8 Database Applications},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2003-02/INPROC-2003-02.pdf,
http://www.cs.rpi.edu/~zaki/DMKD03/},
contact = {rrantzau@acm.org},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {SQL-based data mining algorithms are rarely used in practice today. Most
performance experiments have shown that SQL-based approaches are inferior to
main-memory algorithms. Nevertheless, database vendors try to integrate
analysis functionalities to some extent into their query execution and
optimization components in order to narrow the gap between data and processing.
Such a database support is particularly important when data mining applications
need to analyze very large datasets or when they need access current data, not
a possibly outdated copy of it.
We investigate approaches based on SQL for the problem of finding frequent
itemsets in a transaction table, including an algorithm that we recently
proposed, called Quiver, which employs universal and existential
quantifications. This approach employs a table schema for itemsets that is
similar to the commonly used vertical layout for transactions: each item of an
itemset is stored in a separate row. We argue that expressing the frequent
itemset discovery problem using quantifications offers interesting
opportunities to process such queries using set containment join or set
containment division operators, which are not yet available in commercial
database systems. Initial performance experiments reveal that Quiver cannot be
processed efficiently by commercial DBMS. However, our experiments with query
execution plans that use operators realizing set containment tests suggest that
an efficient processing of Quiver is possible.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2003-02&engl=1}
}
@inproceedings {INPROC-2003-01,
author = {Christoph Mangold and Ralf Rantzau and Bernhard Mitschang},
title = {{F{\"o}deral: Management of Engineering Data Using a Semistructured Data Model}},
booktitle = {Proceedings of the International Conference on Enterprise Information Systems (ICEIS), Angers, France, April 2003},
publisher = {Unknown},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
type = {Conference Paper},
month = {April},
year = {2003},
isbn = {972-98816-1-8},
keywords = {product data management, semistructured data, integration, data modeling},
language = {English},
cr-category = {H.2 Database Management},
contact = {christoph.mangold@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {The F{\"o}deral system is a flexible repository for the management, integration and
modeling of product data. Current systems in this domain employ object-oriented
data models. Whereas this is adequate for the management of product data, it
proves insufficient for integration and modeling. Present semistructured data
models, however, are suited ideally for integration, but data management and
also modeling is a problem. In this paper we describe our approach to narrow
down the gap between structured and semistructured data models. We present the
F{\"o}deral information system which employs a new semistructured data model and
show how this model can be employed in the context of management, integration,
and modeling of engineering data.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2003-01&engl=1}
}
@inproceedings {INPROC-2002-42,
author = {Bernhard Mitschang},
title = {{A Necessity for CSCW in Design - The CHAMPAGNE Approach and Experience (invited)}},
booktitle = {A Necessity for CSCW in Design - The CHAMPAGNE Approach and Experience},
editor = {Uni Rio de Janiero},
address = {Rio de Janiero},
publisher = {The Seventh International Conference on Computer Supported Cooperative Work in Design},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {1--2},
type = {Conference Paper},
month = {September},
year = {2002},
language = {English},
cr-category = {H.2.4 Database Management Systems},
contact = {Bernhard Mitschang Bernhard.Mitschang@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Data Propagation: A Necessity for CSCW in Design - The CHAMPAGNE Approach and
Experience (invited), in: The Seventh International Conference on Computer
Supported Cooperative Work in Design, Rio de Janeiro, Brasil, 2002.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2002-42&engl=1}
}
@inproceedings {INPROC-2002-41,
author = {Aiko Frank and Bernhard Mitschang},
title = {{A customizable shared information space to support concurrent design}},
booktitle = {Computer in Industry},
address = {Amsterdam},
publisher = {Elsevier Science Publishers B. V.},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
volume = {48},
pages = {45--57},
type = {Conference Paper},
month = {May},
year = {2002},
language = {English},
cr-category = {H.5.3 Group and Organization Interfaces},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Sharing data is an important aspect in distributed design environments and
should be supported by an underlying system. Any synchronous access to data is
conflict prone. Applying concurrency control and two phase commit are one
option to be considered. But design processes also demand for cooperation
between the designers. Negotiation about actions on the product under design
and the early exchange of preliminary results are crucial issues. Controlled
data access by itself does not fulfil all the needs for cooperation. We will
present a new approach that relies on a concept and system model which
integrates concurrent activities by a joint information space offering flexible
protocols for cooperation on the shared objects. We will describe the
customizability of the protocols to effectively support different cooperative
scenarios.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2002-41&engl=1}
}
@inproceedings {INPROC-2002-18,
author = {Carmen Constantinescu and Uwe Heinkel and Holger Meinecke},
title = {{A Data Change Propagation System for Enterprise Application Integration}},
booktitle = {The 2nd International Conference on Information Systems and Engneering (ISE 2002)},
editor = {Waleed W. Smari and Nordine Melab and Shu-Ching Chen},
address = {San Diego},
publisher = {The Society for Modeling and Simulation International},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
pages = {129--134},
type = {Conference Paper},
month = {July},
year = {2002},
isbn = {1-56555-251-2},
keywords = {information systems; integration of heterogeneous data sources; XML technology},
language = {English},
cr-category = {H.2.5 Heterogeneous Databases},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2002-18/INPROC-2002-18.pdf},
contact = {carmen.constantinescu@informatik.uni-stuttgart.de oder uwe.heinkel@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Most enterprises have a diverse environment of heterogeneous and autonomous
information systems. If the same data is relevant for several information
systems, then data changes in one supplier system affect data stored in other
demander systems. The process of exchanging changed data between systems, named
change propagation, is based on dependencies established between these systems.
The management of a single, integrated enterprise information system is often
infeasible or too expensive, due to the autonomy of business units and the
heterogeneity of their IT infrastructures. The solution is to support the
enterprise by a generic approach able to manage data dependencies and to
transform data stored in a source information system into data stored in the
dependent information systems. We propose a loosely coupled system, called
Stuttgart Information and Exploration System. Our prototype mainly consists of
a data dependency specification tool, a propagation engine and a repository
that stores all relevant objects for these components.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2002-18&engl=1}
}
@inproceedings {INPROC-2002-17,
author = {Marcello Mariucci and Bernhard Mitschang},
title = {{On Making RAMSES an Earth Observation Application Framework}},
booktitle = {Proceedings of the 2nd International Conference on Information Systems and Engineering: ISE 2002; San Diego, California, July 14-18, 2002},
editor = {Waleed W. Smari and Nordine Melab and Shu-Ching Chen},
address = {San Diego},
publisher = {The Society for Modeling and Simulation International (SCS)},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
series = {Simulation Series},
volume = {34 (2)},
pages = {67--72},
type = {Conference Paper},
month = {July},
year = {2002},
isbn = {1-56555-251-2},
keywords = {Frameworks for Information Technologies, Software Architectures for Information Systems, Component-Based Designs, Service-Based Approaches},
language = {English},
cr-category = {H.4.m Information Systems Applications Miscellaneous},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2002-17/INPROC-2002-17.pdf},
contact = {For further information, please send an email to mariucci@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {RAMSES is one of the first large-scale prototypes of an operational Earth
Observation (EO) application system. It implements a complex infrastructure for
the extensive support of a thematic EO application system, which focuses on the
detection and monitoring of oil spills. Since EO application systems are
usually built on top of a set of generic functions, this paper analyses and
assesses the RAMSES infrastructure in order to form a generic EO application
framework. This framework should mainly support the collaborative development
and customization of emerging EO application systems by maximizing the use of
already existing system facilities. Furthermore, it should support the flexible
extension and rapid reconfiguration of workflows as the business changes.
Results of our analyses show that the RAMSES infrastructure does not cover all
requirements of an EO application framework. We therefore introduce advanced
design concepts and propose a new framework architecture that structurally
controls the inherent complexity of the interdisciplinary domain of EO
application systems.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2002-17&engl=1}
}
@inproceedings {INPROC-2002-12,
author = {Ralf Rantzau and Carmen Constantinescu and Uwe Heinkel and Holger Meinecke},
title = {{Champagne: Data Change Propagation for Heterogeneous Information Systems}},
booktitle = {Proceedings of the International Conference on Very Large Databases (VLDB); Demonstration Paper; Hong Kong, August 20-23, 2002},
publisher = {Morgan Kaufmann},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
type = {Conference Paper},
month = {August},
year = {2002},
keywords = {data transformation; data integration; schema mapping},
language = {English},
cr-category = {H.2.5 Heterogeneous Databases},
contact = {rrantzau@acm.org},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Flexible methods supporting the data interchange between autonomous information
systems are important for today's increasingly heterogeneous enterprise IT
infrastructures. Updates, insertions, and deletions of data objects in
autonomous information systems often have to trigger data changes in other
autonomous systems, even if the distributed systems are not integrated into a
global schema. We suggest a solution to this problem based on the propagation
and transformation of data using several XML technologies. Our prototype
manages dependencies between the schemas of distributed data sources and allows
to define and process arbitrary actions on changed data by manipulating all
dependent data sources. The prototype comprises a propagation engine that
interprets scripts based on a workflow specification language, a data
dependency specification tool, a system administration tool, and a repository
that stores all relevant information for these tools.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2002-12&engl=1}
}
@inproceedings {INPROC-2002-05,
author = {Ralf Rantzau},
title = {{Frequent Itemset Discovery with SQL Using Universal Quantification}},
booktitle = {Proceedings of the International Workshop on Database Technologies for Data Mining (DTDM); Prague, Czech Republic, March 2002},
address = {Prague, Czech Republic},
publisher = {unknown},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
pages = {51--66},
type = {Conference Paper},
month = {March},
year = {2002},
keywords = {data mining; association rules; relational division; mining and database integration},
language = {English},
cr-category = {H.2.4 Database Management Systems,
H.2.8 Database Applications},
contact = {rrantzau@acm.org},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Algorithms for finding frequent itemsets fall into two broad classes: (1)
algorithms that are based on non-trivial SQL statements to query and update a
database, and (2) algorithms that employ sophisticated in-memory data
structures, where the data is stored into and retrieved from flat files. Most
performance experiments have shown that SQL-based approaches are inferior to
main-memory algorithms. However, the current trend of database vendors to
integrate analysis functionalities into their query execution and optimization
components, i.e., ``closer to the data,'' suggests revisiting these results and
searching for new, potentially better solutions.
We investigate approaches based on SQL-92 and present a new approach called
Quiver that employs universal and existential quantifications. This approach
uses a table layout for itemsets, where a group of multiple records represents
a single itemset. Hence, our vertical layout is similar to the popular layout
used for the transaction table, which is the input of frequent itemset
discovery. Our approach is particularly beneficial if the database system in
use provides adequate strategies and techniques for processing universally
quantified queries, unlike current commercial systems.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2002-05&engl=1}
}
@inproceedings {INPROC-2002-03,
author = {Carmen Constantinescu and Uwe Heinkel and Ralf Rantzau and Bernhard Mitschang},
title = {{A System for Data Change Propagation in Heterogeneous Information Systems}},
booktitle = {Proceedings of the International Conference on Enterprise Information Systems (ICEIS), Volume I, Cuidad Real, Spain, April 2002},
publisher = {ICEIS Press/Escola Superior de Technologia de Setubal, Portugal},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
pages = {73--80},
type = {Conference Paper},
month = {April},
year = {2002},
keywords = {enterprise application integration; manufacturing; repository; propagation},
language = {English},
cr-category = {H.3.4 Information Storage and Retrieval Systems and Software},
ee = {http://www.iceis.org},
contact = {carmen.constantinescu@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Today, it is common that enterprises manage several mostly heterogeneous
information systems to supply their production and business processes with
data. There is a need to exchange data between the information systems while
preserving system autonomy. Hence, an integration approach that relies on a
single global en-terprise data schema is ruled out. This is also due to the
widespread usage of legacy systems. We propose a system, called Propagation
Manager, which manages dependencies between data objects stored in different
information systems. A script specifying complex data transformations and other
sophisticated activities, like the execution of external programs, is
associated with each dependency. For example, an object update in a source
system can trigger data transformations of the given source data for each
destination system that depends on the object. Our system is implemented using
current XML technologies. We present the archi-tecture and processing model of
our system and demonstrate the benefit of our approach by illustrating an
extensive example scenario.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2002-03&engl=1}
}
@inproceedings {INPROC-2002-01,
author = {Ralf Rantzau and Leonard Shapiro and Bernhard Mitschang and Quan Wang},
title = {{Universal Quantification in Relational Databases: A Classification of Data and Algorithms}},
booktitle = {Proceedings of the International Conference on Extending Database Technology (EDBT), Prague, Czech Republic, March 2002},
publisher = {Springer},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
series = {Lecture Notes in Computer Science},
volume = {2287},
pages = {445--463},
type = {Conference Paper},
month = {March},
year = {2002},
isbn = {3-540-43324-4},
keywords = {query processing; relational division; physical operators},
language = {English},
cr-category = {H.2.4 Database Management Systems},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2002-01/INPROC-2002-01.ps,
http://www.springer.de/comp/lncs/index.html},
contact = {rrantzau@acm.org},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Queries containing universal quantification are used in many applications,
including business intelligence applications. Several algorithms have been
proposed to implement universal quantification efficiently. These algorithms
are presented in an isolated manner in the research literature - typically, no
relationships are shown between them. Furthermore, each of these algorithms
claims to be superior to others, but in fact each algorithm has optimal
performance only for certain types of input data. In this paper, we present a
comprehensive survey of the structure and performance of algorithms for
universal quantification. We introduce a framework for classifying all possible
kinds of input data for universal quantification. Then we go on to identify the
most efficient algorithm for each such class. One of the input data classes has
not been covered so far. For this class, we propose several new algorithms. For
the first time, we are able to identify the optimal algorithm to use for any
given input dataset. These two classifications of input data and optimal
algorithms are important for query optimization. They allow a query optimizer
to make the best selection when optimizing at intermediate steps for the
quantification problem.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2002-01&engl=1}
}
@inproceedings {INPROC-2001-70,
author = {Albrecht Messner and Bernhard Mitschang},
title = {{Leistungsbewertung f{\"u}r Application Server Technologie: ein parametrisierbarer Benchmark-Ansatz am Beispiel des Brokat Twister Application Servers.}},
booktitle = {GI Jahrestagung (2) 2001. Bd. 2},
editor = {Gesellschaft f{\"u}r Informatik e.V.},
address = {Wien},
publisher = {Gesellschaft f{\"u}r Informatik e.V.},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {909--915},
type = {Conference Paper},
month = {September},
year = {2001},
isbn = {3-85403-157-2},
language = {German},
cr-category = {H.2.2 Database Management Physical Design},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {E-Commerce: Systemunterst{\"u}tzung f{\"u}r den Umgang mit Daten und Prozessen in
vernetzten Anwendungsumgebungen. Leistungsbewertung f{\"u}r Application Server
Technologie: ein parametrisierbarer Benchmark-Ansatz am Beispiel des Brokat
Twister Application Servers. 909-915},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2001-70&engl=1}
}
@inproceedings {INPROC-2001-44,
author = {Daniela Nicklas and Christoph Pfisterer and Bernhard Mitschang},
title = {{Towards Location-based Games}},
booktitle = {Proceedings of the International Conference on Applications and Development of Computer Games in the 21st Century: ADCOG 21; Hongkong Special Administrative Region, China, November 22-23 2001},
editor = {Alfred Loo Wai Sing and Wan Hak Man and Wong Wai and Cyril Tse Ning},
address = {Hong Kong},
publisher = {Division of Computer Studies, City University of Hong kong, Hong Kong SAR, China},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {61--67},
type = {Conference Paper},
month = {November},
year = {2001},
isbn = {924-442-199-4},
keywords = {Nexus; Location-based Services; Games; Augmented World Model; Nexus Applications},
language = {German},
cr-category = {D.2.11 Software Engineering Software Architectures,
H.3.5 Online Information Services,
K.8 Personal Computing},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2001-44/INPROC-2001-44.pdf,
ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2001-44/INPROC-2001-44.ps,
http://www.nexus.uni-stuttgart.de},
contact = {Daniela Nicklas danickla@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {In this paper we investigate the basic properties of location-based games. This
new type of game is made possible by the recent advances of mobile computing
hardware and infrastructure. Players act not by pressing buttons or moving
pawns on a board, but by moving around themselves in the real world. We present
a simple classification of location-based games, and show how these games can
be designed and implemented. With some adaptations, game concepts from existing
board and computer games can be mapped to make location-based games more
interesting and fun to play. Our methods are demonstrated with three actual
game examples. Further, common infrastructure requirements are deduced and we
show how the open platform developed by the neXus working group fulfills them.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2001-44&engl=1}
}
@inproceedings {INPROC-2001-43,
author = {Daniela Nicklas and Bernhard Mitschang},
title = {{The Nexus Augmented World Model: An Extensible Approach for Mobile, Spatially-Aware Applications}},
booktitle = {Proceedings of the 7th International Conference on Object-Oriented Information Systems : OOIS '01 ; Calgary, Canada, August 27-29, 2001},
editor = {Yingxu Wang and Shushma Patel and Ronald Johnston},
address = {London},
publisher = {Springer-Verlag},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
pages = {392--401},
type = {Conference Paper},
month = {January},
year = {2001},
isbn = {1-85233-546-7},
keywords = {Nexus; location-based services; location-aware; augmented world model},
language = {English},
cr-category = {H.2.1 Database Management Logical Design,
H.2.8 Database Applications,
H.3.5 Online Information Services},
ee = {http://www.nexus.uni-stuttgart.de/},
contact = {Daniela Nickals daniela.nicklas@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {How should the World Wide Web look like if it were for location-based
information? And how would mobile, spatially aware applications deal with such
a platform? In this paper we present the neXus Augmented World Model, an object
oriented data model which plays a major role in an open framework for both
providers of location-based information and new kinds of applications: the
neXus platform. We illustrate the usability of the model with several sample
applications and show the extensibility of this framework. At last we present a
stepwise approach for building spatially aware applications in this
environment.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2001-43&engl=1}
}
@inproceedings {INPROC-2001-42,
author = {Michael Kessler and Gundolf Kiefer and Jens Leenstra and Knut Sch{\"u}nemann and Thomas Schwarz and Hans-Joachim Wunderlich},
title = {{Using a Hierarchical DfT Methodology in High Frequency Processor Designs for Improved Delay Fault Testability}},
booktitle = {Proceedings of the International Test Conference : ITC 2001 ; Baltimore, Maryland, October 30-November 1, 2001},
publisher = {IEEE Computer Society Press},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
pages = {461--469},
type = {Conference Paper},
month = {October},
year = {2001},
isbn = {0-7803-7169-0},
keywords = {hierarchical; DfT; BIST; testability; scan chain reordering},
language = {English},
cr-category = {B.8.1 Reliability, Testing, and Fault-Tolerance,
C.1 Processor Architectures,
C.4 Performance of Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Computer Science, Computer Architecture},
abstract = {In this paper a novel hierarchical DfT methodology is presented which is
targeted to improve the delay fault testability for external testing and
scan-based BIST. After the partitioning of the design into high frequency
macros, the analysis for delay fault testability already starts in parallel
with the implementation at the macro level. A specification is generated for
each macro that defines the delay fault testing characteristics at the macro
boundaries. This specification is used to analyse and improve the delay fault
testability by improving the scan chain ordering at macro-level before the
macros are connected together into the total chip network. The hierarchical
methodology has been evaluated with the instruction window buffer core of an
out-of-order processor. It was shown that for this design practically no extra
hardware is required.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2001-42&engl=1}
}
@inproceedings {INPROC-2001-41,
author = {Daniela Nicklas and Matthias Gro{\ss}mann and Thomas Schwarz and Steffen (ifp) Volz and Bernhard Mitschang},
title = {{A Model-Based, Open Architecture for Mobile, Spatially Aware Applications}},
booktitle = {Proceedings of the 7th International Symposium on Spatial and Temporal Databases: SSTD 2001; Redondo Beach, CA, USA, July 12-15, 2001},
editor = {Christian S. Jensen and Markus Schneider and Bernhard Seeger and Vassilis J. Tsotras},
address = {Berlin, Heidelberg, New York},
publisher = {Springer-Verlag},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
series = {Lecture Notes in Computer Science},
volume = {2121},
pages = {117--135},
type = {Conference Paper},
month = {July},
year = {2001},
isbn = {3-540-42301-X},
keywords = {Mobile Computing; Location-Aware Applications; Augmented World Model},
language = {English},
cr-category = {H.2.1 Database Management Logical Design,
H.3.4 Information Storage and Retrieval Systems and Software,
H.3.5 Online Information Services},
ee = {http://www.nexus.uni-stuttgart.de},
contact = {daniela.nicklas@informatik.uni-stuttgart.de, matthias.grossmann@informatik.uni-stuttgart.de, thomas.schwarz@informatik.uni-stuttgart.de, steffen.volz@ifp.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems;
Universit{\"a}t Stuttgart, Institut f{\"u}r Photogrammetrie (ifp)},
abstract = {With the emerging availability of small and portable devices that are able to
determine their position and to communicate wirelessly, mobile and spatially
aware applications become feasible. These applications rely on information that
is bound to locations. In this paper we present Nexus, a platform for such
applications, which is open for both new applications and new information
providers, similar to the World Wide Web. Distributed servers provide
location-based information, which is federated to an integrated view for the
applications. To achieve this goal, we present the concept of the Augmented
World Model, which is a common data model for location-based information. We
give an example to show how applications can use this platform.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2001-41&engl=1}
}
@inproceedings {INPROC-2001-40,
author = {Jochen R{\"u}tschlin and G{\"u}nter Sauter and J{\"u}rgen Sellentin and Klaudia Hergula and Bernhard Mitschang},
title = {{Komponenten-Middleware: Der n{\"a}chste Schritt zur Interoperabilit{\"a}t von IT-Systemen}},
booktitle = {Tagungsband der 9. GI-Fachtagung ``Datenbanksysteme in B{\"u}ro, Technik und Wissenschaft'' (BTW 2001), 7.-9. M{\"a}rz 2001, Oldenburg},
editor = {Andreas Heuer and Frank Leymann and Denny Priebe},
address = {Berlin, Heidelberg, New York},
publisher = {Springer-Verlag},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
pages = {322--331},
type = {Conference Paper},
month = {March},
year = {2001},
isbn = {3-540-41707-9},
language = {German},
cr-category = {H.3.4 Information Storage and Retrieval Systems and Software,
H.3.5 Online Information Services},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2001-40/INPROC-2001-40.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {In diesem Papier stellen wir eine erste Konzeption f{\"u}r eine komponentenbasierte
Middleware vor. Dabei verwenden wir neutrale Daten- und Beschreibungsmodelle,
um eine Abstraktion bzgl. bestehender Komponentenmodelle zu erlangen.
Kernpunkte in unserer Architektur sind die Komponentenschnittstellen, das auf
SOAP basierende Kommunikationsprotokoll und ein Corporate Repository.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2001-40&engl=1}
}
@inproceedings {INPROC-2001-39,
author = {Jochen R{\"u}tschlin and J{\"u}rgen Sellentin and Bernhard Mitschang},
title = {{Industrieller Einsatz von Application Server Technologie}},
booktitle = {Informatik 2001: Wirtschaft und Wissenschaft in der Network Economy – Visionen und Wirklichkeit. Tagungsband der GI/OCG-Jahrestagung, 25.-28. September 2001, Universit{\"a}t Wien.},
editor = {Kurt Bauknecht and Wilfried Brauer and Thomas M{\"u}ck},
publisher = {{\"O}sterreichische Computer Gesellschaft},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
pages = {916--921},
type = {Conference Paper},
month = {September},
year = {2001},
isbn = {3-85403-157-2},
keywords = {Enterprise Application Integration, EAI, Integrationsarchitektur, Middleware, Application Server, J2EE},
language = {German},
cr-category = {C.2.4 Distributed Systems,
D.2.11 Software Engineering Software Architectures,
D.2.12 Software Engineering Interoperability,
H.4.m Information Systems Applications Miscellaneous},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {In diesem Beitrag wollen wir anhand einer Integrationsarchitektur aus dem
EAI-Umfeld motivieren und aufzeigen, wie Application Server Technologie
sinnvoll bei der Zusammenf{\"u}hrung von Systemen in einer vernetzten Umgebung
eingesetzt werden kann. Dazu stellen wir erst unsere bisherige
Integrationsarchitektur vor und erl{\"a}utern an dieser einige Nachteile des
traditionellen Vorgehens. Ein Abschnitt {\"u}ber Application Server und die
J2EE-Bestrebungen leiten {\"u}ber zu einem Neuvorschlag der
Integrationsarchitektur, realisiert auf Basis eben dieser Application Server
Technologie.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2001-39&engl=1}
}
@inproceedings {INPROC-2001-38,
author = {Jochen R{\"u}tschlin},
title = {{Ein Portal - Was ist das eigentlich?}},
booktitle = {Informatik 2001: Wirtschaft und Wissenschaft in der Network Economy – Visionen und Wirklichkeit. Tagungsband der GI/OCG-Jahrestagung, 25.-28. September 2001, Universit{\"a}t Wien.},
editor = {Kurt Bauknecht and Wilfrie Brauer and Thomas M{\"u}ck},
publisher = {{\"O}sterreichische Computer Gesellschaft},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
pages = {691--696},
type = {Conference Paper},
month = {September},
year = {2001},
isbn = {3-85403-157-2},
keywords = {Portal,Portale},
language = {German},
cr-category = {H.5.2 Information Interfaces and Presentation User Interfaces},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2001-38/INPROC-2001-38.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {In diesem Beitrag soll der Versuch unternommen werden, den vagen Begriff eines
Portals genauer zu spezifizieren. Dazu werden Intention und technische
Bestandteile eines Portals er{\"o}rtert. Hilfreich dabei ist der Blick auf
bestehende Systeme, die unter dem Begriff Portal laufen, um durch
Generalisierung und Klassifikation einer Definition n{\"a}her zu kommen. In diesem
Zusammenhang werden auch einige Ergebnisse aus einer in unserem Hause
durchgef{\"u}hrten Evaluierung von drei Portal-Systemen vorgestellt. Als Ausblick
werden einige Anforderungen angesprochen, die mit der heutigen
Portal-Technologie gar nicht oder nur sehr schwer bzw. umst{\"a}ndlich zu
realisieren sind.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2001-38&engl=1}
}
@inproceedings {INPROC-2001-37,
author = {Carmen Constantinescu and Uwe Heinkel and Ralf Rantzau and Bernhard Mitschang},
title = {{SIES - An Approach for a Federated Information System in Manufacturing}},
booktitle = {Proceedings of the International Symposium on Information Systems and Engineering (ISE); Las Vegas, Nevada, USA, June 2001},
publisher = {CSREA Press},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
pages = {269--275},
type = {Conference Paper},
month = {June},
year = {2001},
isbn = {1-892512-85-8},
keywords = {enterprise application integration; manufacturing; federation; repository; propagation},
language = {English},
cr-category = {H.3.4 Information Storage and Retrieval Systems and Software},
contact = {carmen.constantinescu@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Many problems encountered in providing enterprise-wide information are related
to the integration of databases and systems that have been independently
developed and also to the management of changes and transformations of data
from one database (or system) into another. A major requirement is to
accommodate heterogeneity and at the same time to preserve the autonomy of the
components. This paper presents our approach to a repository-driven federated
system based on a propagation mechanism. The Stuttgart Information and
Exploration System (SIES), is characterized by its main components: the
Federation Manager, the Propagation Manager and the Re-pository System.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2001-37&engl=1}
}
@inproceedings {INPROC-2001-34,
author = {Aiko Frank},
title = {{Agent Protocols for Integration and Cooperation in a Design Application Framework}},
booktitle = {Proceedings of the 2nd International Workshop on Software Agents and Workflow for Systems Interoperability, London, ON, Canada, July 14th, 2001},
editor = {Zakaria Maamar and Weiming Shen and Hamada H. Ghenniwa},
address = {London, Canada},
publisher = {NRC Research Council Canada},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
pages = {31--38},
type = {Conference Paper},
month = {July},
year = {2001},
isbn = {0-660-18552-0},
keywords = {Designflow; CSCW; data sharing; agent protocols},
language = {English},
cr-category = {H.4.1 Office Automation,
H.5.3 Group and Organization Interfaces,
J.6 Computer-Aided Engineering},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2001-34/INPROC-2001-34.pdf},
contact = {frankao@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Design is a discipline, which requires support for the combination of different
systems and tools, spontaneous interaction between designers, sharing of
(design) data, and adaptability to altering conditions. When looking at design
processes it is a demanding task to coordinate and organize the work of the
different team members. The work of a single person not only depends upon the
work of others, but can also influence the work of other designers as well.
Correspondingly, patterns of cooperation are to be established to minimize or
resolve resulting conflicts, support the concurrent use of data, and to
coordinate the work process. In order to fulfill those requirements the ASCEND
Designflow Model and its prototype CASSY offer a corresponding object model, an
architecture combining workflow and groupware systems, and finally a protocol
integration layer based on agent communication. This protocol layer helps to
flexibly combine the functionality of the different systems and services, since
it can be adapted to different application scenarios. In this paper, we will
describe the concepts of the protocols and their implementation architecture,
resulting in a generic protocol engine.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2001-34&engl=1}
}
@inproceedings {INPROC-2001-33,
author = {Aiko Frank and Bernhard Mitschang},
title = {{On Sharing of Objects in Concurrent Design}},
booktitle = {Proceedings of the 6th International Conference on CSCW in Design (CSCWID), London, ON, Canada, July, 2001},
editor = {Weiming Shen and Zongkai Lin and Jean-Paul Barth{\`e}s and Mohamed Kamel},
address = {Ottawa, Canada},
publisher = {NRC Research Press},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
pages = {71--76},
type = {Conference Paper},
month = {July},
year = {2001},
isbn = {0-660-18493-1},
keywords = {Designflow; CSCW; data sharing; agent protocols},
language = {English},
cr-category = {H.4.1 Office Automation,
H.5.3 Group and Organization Interfaces,
J.6 Computer-Aided Engineering},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2001-33/INPROC-2001-33.pdf},
contact = {frankao@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Sharing data is an important aspect in distributed design environments and
should be supported by an underlying system. Any synchronous access to data is
conflict prone. Applying concurrency control and two phase commit are an option
to be considered. But design processes also demand cooperation between the
designers. Negotiation about actions on the product under design and the early
exchange of preliminary results are crucial issues. Controlled data access by
itself doesn’t fullfil all the needs for cooperation. We will present a new
approach that relies on a concept and system model which integrates concurrent
activities by a common information space offering flexible protocols for
cooperation on the shared objects. We will describe the customizability of the
protocols to allow the approach to be adapted to different cooperative
scenarios.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2001-33&engl=1}
}
@inproceedings {INPROC-2001-32,
author = {Holger Schwarz and Ralf Wagner and Bernhard Mitschang},
title = {{Improving the Processing of Decision Support Queries: The Case for a DSS Optimizer}},
booktitle = {Proc. of the 2001 International Database Engineering \& Applications Symposium (IDEAS), July 16-18, 2001},
editor = {Michel Adiba and Christine Collet and Bipin C. Desai},
address = {Los Alamitos, Washington, Brussels, Tokyo},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
pages = {177--186},
type = {Conference Paper},
month = {July},
year = {2001},
isbn = {0-7695-1140-6},
keywords = {Decision Support; OLAP; Data Warehouse},
language = {English},
cr-category = {H.4.2 Information Systems Applications Types of Systems},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2001-32/INPROC-2001-32.pdf},
contact = {holger.schwarz@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Many decision support applications are built upon data mining and OLAP tools
and allow users to answer information requests based on a data warehouse that
is managed by a powerful DBMS. In this paper, we focus on tools that generate
sequences of SQL statements in order to produce the requested information. Our
thorough analysis revealed that many sequences of queries that are generated by
commercial tools are not very efficient. An optimized system architecture is
suggested for these applications. The main component is a DSS optimizer that
accepts previously generated sequences of queries and remodels them according
to a set of optimization strategies, before they are executed by the underlying
database system. The advantages of this extended architecture are discussed and
a couple of appropriate optimization strategies are identified. Experimental
results are given, showing that these strategies are appropriate to optimize
typical query sequences of an OLAP application.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2001-32&engl=1}
}
@inproceedings {INPROC-2000-18,
author = {Marcello Mariucci and Christophe Caspar and Luigi Fusco and Yves Henaff and Michele Forte},
title = {{RAMSES: An Operational Thematic EO-Application on Oil Spill Monitoring. System description}},
booktitle = {Proceedings of the Conference on Earth Observation (EO) \& Geo-Spatial (GEO) Web and Internet Workshop 2000: EOGEO 2000; London, United Kingdom, April 17-19, 2000 ). Commitee on Earth Observation Satellites (CEOS). 2000.},
address = {http},
publisher = {//webtech.ceos.org/eogeo2000},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
pages = {1--10},
type = {Conference Paper},
month = {April},
year = {2000},
keywords = {Earth Observation Application Systems; Web Application Server; Middleware; CORBA; JAVA; Oil Pollution},
language = {English},
cr-category = {H.2 Database Management,
H.4 Information Systems Applications,
J.2 Physical Sciences and Engineering,
E.1 Data Structures},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2000-18/INPROC-2000-18.ps,
http://webtech.ceos.org/eogeo2000},
contact = {Please send an email to mariucci@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {RAMSES (Regional earth observation Application for Mediterranean Sea Emergency
Surveillance) is a web-based operational thematic EO Application System for oil
spill detection and monitoring. Involving many geographically distributed
resources and competences, the system had to meet strong requirements in terms
of platform-independency and expandability. In addition, because thematic
applications using remote sensing data share a lot of commonalties, the system
has been designed in a modular way to ease modules reusability. The fulfilment
of these objectives was largely supported by recent technologies such as CORBA
and Java. After defining the actors and services, this paper presents the
architecture of the system. It then outlines further improvements that can be
considered for the oil spill application itself as well as further steps
required towards a full multi-application support system.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2000-18&engl=1}
}
@inproceedings {INPROC-2000-17,
author = {Jochen R{\"u}tschlin},
title = {{The Requirements for a Component-based Architecture}},
booktitle = {Proceedings of the ProSTEP Science Days 2000 ``SMART Engineering'', 13./14. September 2000 at DaimlerChrysler, Stuttgart Germany.},
editor = {Reiner Anderl and Christine Frick and Alfred Katzenbach and Joachim Rix},
publisher = {ProSTEP e.V.},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
pages = {252--260},
type = {Conference Paper},
month = {September},
year = {2000},
isbn = {3-8167-5585-2},
keywords = {components, component model, framework, packaged software},
language = {English},
cr-category = {H.3.4 Information Storage and Retrieval Systems and Software,
H.3.5 Online Information Services},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2000-17/INPROC-2000-17.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Today’s enterprises are going to concentrate more and more on their core
business. They do not want to implement basic functionality again and again.
That's why they are using packaged software products like SAP R/3, Dassault
Systemes' ENOVIA, and so on. But the introduction of such software suites also
brings in elements and information that are already available in the enterprise
(and should remain in the legacy systems).
In this paper, we will point out the problem on two typical architectural
scenarios and give with this the motivation for a component-based architecture.
Besides we propose an architecture and discuss the lacks of the current CORBA-,
EJB- and COM-approaches.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2000-17&engl=1}
}
@inproceedings {INPROC-2000-01,
author = {Clara Nippl and Ralf Rantzau and Bernhard Mitschang},
title = {{StreamJoin: A Generic Database Approach to Support the Class of Stream-Oriented Applications}},
booktitle = {Proceedings of the International Database Engineering \& Applications Symposium (IDEAS); Yokohama, Japan, September 2000},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
pages = {83--91},
type = {Conference Paper},
month = {September},
year = {2000},
isbn = {0 7695 0789 1},
keywords = {database applications; data mining; database extensions; database operators},
language = {English},
cr-category = {H.2.4 Database Management Systems,
H.2.8 Database Applications},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2000-01/INPROC-2000-01.ps},
contact = {rrantzau@acm.org},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Today many applications routinely generate large quantities of data. The data
often takes the form of (time) series, or more generally streams, i.e. an
ordered se-quence of records. Analysis of this data requires stream processing
techniques which differ in significant ways from what current database analysis
and query tech-niques have been optimized for. In this paper we present a new
operator, called StreamJoin, that can efficiently be used to solve
stream-related problems of various appli-cations, such as universal
quantification, pattern recog-nition and data mining. Contrary to other
approaches, StreamJoin processing provides rapid response times, a non-blocking
execution as well as economical resource utilization. Adaptability to different
application scenarios is realized by means of parameters. In addition, the
StreamJoin operator can be efficiently embedded into the database engine, thus
implicitly using the optimization and parallelization capabilities for the
benefit of the ap-plication. The paper focuses on the applicability of
StreamJoin to integrate application semantics into the DBMS.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2000-01&engl=1}
}
@inproceedings {INPROC-1999-30,
author = {Hermann Ludwig M{\"o}ller and Marcello Mariucci and Bernhard Mitschang},
title = {{Architecture Considerations for Advanced Earth Observation Application Systems}},
booktitle = {Proceedings of the Second International Conference on Interoperating Geographic Information System: Interop '99; Zurich, Switzerland, March 10-12, 1999},
editor = {Andrej V{\`e}kovski and Brassel Kurt E. and Schek Hans-J{\"o}rg},
address = {Berlin, Heidelberg, New York, Barcelona, Hong Kong, London, Milan, Paris, Singapore},
publisher = {Springer-Verlag},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
series = {Lecture Notes in Computer Science},
volume = {1580},
pages = {75--90},
type = {Conference Paper},
month = {March},
year = {1999},
isbn = {3-540-65725-8},
keywords = {Distributed Information Systems; Earth Observation Systems; Applications; Interoperability; Middleware; CORBA},
language = {English},
cr-category = {H.2 Database Management,
H.4 Information Systems Applications,
J.2 Physical Sciences and Engineering,
E.1 Data Structures},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-1999-30/INPROC-1999-30.pdf,
ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-1999-30/INPROC-1999-30.ps},
contact = {Please send an email to mariucci@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Application systems in the earth observation area can be characterised as
distributed, platform-inhomogeneous, complex, and cost intensive information
systems. In order to manage the complexity and performance requirements set by
these application scenarios a number of architectural considerations have to be
applied. Among others the most important ones are modularization towards a
component architecture and interoperation within this component model. As will
be described in this paper, both are mandatory to achieving a high degree of
reusability and extensibility at the component level as well as to support the
necessary scalability properties. In our paper we refer to the state of the art
in earth observation application systems as well as to a prototype system that
reflects to a high degree the above mentioned system characteristics.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-1999-30&engl=1}
}
@inproceedings {INPROC-1999-28,
author = {Aiko Frank},
title = {{Towards an Activity Model for Design Applications}},
booktitle = {Proceedings of the 14th Int'l. Conference April 7-9, 1999, Cancun, Mexico},
editor = {R. Y. Lee},
publisher = {ISCA},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
pages = {1--10},
type = {Conference Paper},
month = {April},
year = {1999},
isbn = {1-880843-27-7},
language = {English},
cr-category = {H Information Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Towards an Activity Model for Design Applications.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-1999-28&engl=1}
}
@inproceedings {INPROC-1999-27,
author = {J{\"u}rgen Sellentin and Aiko Frank and Bernhard Mitschang},
title = {{TOGA -- A Customizable Service for Data-Centric Collaboration}},
booktitle = {Proceedings of the 11th Conference on Advanced Information Systems Engineering (CAiSE*99)},
editor = {Matthias Jarke and Andreas Oberweis},
publisher = {Springer},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
series = {Lecture Notes in Computer Science},
volume = {1626},
pages = {301--316},
type = {Conference Paper},
month = {June},
year = {1999},
isbn = {3-540-66157-3},
language = {English},
cr-category = {H.2 Database Management},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {TOGA -- A Customizable Service for Data-Centric Collaboration.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-1999-27&engl=1}
}
@inproceedings {INPROC-1999-26,
author = {J{\"u}rgen Sellentin and Berhard Mitschang},
title = {{Design and Implementation of a CORBA Query Service Accessing EXPRESS-based Data}},
booktitle = {The 6th International Conference on Database Systems for Advanced Applications (DASFAA), Hsinchu, Taiwan, April 19-21, 1999},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
pages = {273--282},
type = {Conference Paper},
month = {April},
year = {1999},
keywords = {EXPRESS, CORBA},
language = {English},
cr-category = {H.2.4 Database Management Systems,
J.6 Computer-Aided Engineering},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-1999-26/INPROC-1999-26.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {In this paper we describe the design and implementation of a CORBA Query
Service targeted to access data that is defined by the EXPRESS data modeling
language. EXPRESS is used primarily in engineering domains (like CAD/CAM and
GIS) to describe mostly product model data (like parts explosion or product
geometry). In order to bring query facilities for EXPRESS-based data to CORBA a
number of design decisions have to be taken, although the CORBA Query Service
is standardized by the OMG. Among the most important and performance-indicating
decisions are the definition of an appropriate query language and the
description of the query result data structures. In this paper we discuss
solutions to these topics and report on the experiences gained in designing and
implementing our first CORBA Query Service for EXPRESS-based Data.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-1999-26&engl=1}
}
@inproceedings {INPROC-1999-25,
author = {Stefan Sarstedt and G{\"u}nter Sauter and J{\"u}rgen Sellentin and Bernhard Mitschang},
title = {{Integrationskonzepte f{\"u}r heterogene Anwendungssysteme bei DaimlerChrysler auf Basis internationaler Standards}},
booktitle = {Datenbanksysteme in B{\"u}ro, Technik und Wissenschaft, GI-Fachtagung BTW 99, Freiburg im Breisgau, 1.-3. M{\"a}rz 1999},
editor = {A. Buchmann},
address = {Berlin, Heidelberg, New York},
publisher = {Springer},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
series = {Informatik aktuell},
pages = {317--327},
type = {Conference Paper},
month = {March},
year = {1999},
isbn = {3-540-65606-5},
keywords = {Funktionsintegration; API-Integration; Heterogenit{\"a}t; STEP; CORBA},
language = {German},
cr-category = {H.2.5 Heterogeneous Databases},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-1999-25/INPROC-1999-25.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Aufbauend auf den Anforderungen von DaimlerChrysler und unter Verwendung der
Standards STEP und CORBA wird eine Architektur und Vorgehensweise f{\"u}r die
Integration von Daten und Funktionen heterogener Anwendndungssysteme
entwickelt. Die eingebrachten Systemkonzepte sowie die dadurch zu erwartende
Optimierung des Entwicklungsprozesses werden am Beispiel des Bereichs
PKW-Entwicklung diskutiert.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-1999-25&engl=1}
}
@inproceedings {INPROC-1999-24,
author = {Michael Jaedicke and Bernhard Mitschang},
title = {{User-Defined Table Operators: Enhancing Extensibility for ORDBMS}},
booktitle = {VLDB'99, Proceedings of 25th International Conference on Very Large Data Bases, Edinburgh, Scotland, UK, September 7-10, 1999},
publisher = {Morgan Kaufmann},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
pages = {494--505},
type = {Conference Paper},
month = {May},
year = {1999},
isbn = {1-55860-615-7},
language = {English},
cr-category = {H.2.4 Database Management Systems},
ee = {http://www3.informatik.tu-muenchen.de/public/projekte/sfb342/publications/99.SFB-Bericht.Multiop.ps.gz},
contact = {Bernhard Mitschang mitsch@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Currently parallel object-relational database technology is setting the
direction for the future of data management. A central enhancement of
object-relational database technology is the possibility to execute arbitrary
user-defined functions within SQL statements. We show the limits of this
approach and propose user-defined table operators as a new concept that allows
the definition and implementation of arbitrary user-defined N-ary database
operators, which can be programmed using SQL or Embedded SQL (with some
extensions). Our approach leads to a new dimension of extensibility that allows
to push more application code into the server with full support for efficient
execution and parallel processing. Furthermore it allows performance
enhancements of orders of magnitude for the evaluation of many queries with
complex user-defined functions as we show for two concrete examples. Finally,
our implementation perception guarantees that this approach fits well into the
architectures of commercial object-relational database management systems.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-1999-24&engl=1}
}
@inproceedings {INPROC-1999-01,
author = {Ralf Rantzau and Holger Schwarz},
title = {{A Multi-Tier Architecture for High-Performance Data Mining}},
booktitle = {Proceedings of the Conference Datenbanksysteme in B{\"u}ro, Technik und Wissenschaft (BTW 1999), Freiburg, Germany, March 1999},
editor = {A. P. Buchmann},
address = {Berlin, Heidelberg, New York},
publisher = {Springer},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
series = {Informatik aktuell},
pages = {151--163},
type = {Conference Paper},
month = {March},
year = {1999},
isbn = {3-540-65606-5},
language = {English},
cr-category = {H.2.8 Database Applications},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-1999-01/INPROC-1999-01.ps,
ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-1999-01/INPROC-1999-01.pdf},
contact = {rrantzau@acm.org},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Data mining has been recognised as an essential element of decision support,
which has increasingly become a focus of the database industry. Like all
computationally expensive data analysis applications, for example Online
Analytical Processing (OLAP), performance is a key factor for usefulness and
acceptance in business. In the course of the CRITIKAL project (Client-Server
Rule Induction Technology for Industrial Knowledge Acquisition from Large
Databases), which is funded by the European Commission, several kinds of
architectures for data mining were evaluated with a strong focus on high
performance. Specifically, the data mining techniques association rule
discovery and decision tree induction were implemented into a prototype. We
present the architecture developed by the CRITIKAL consortium and compare it to
alternative architectures.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-1999-01&engl=1}
}
@inproceedings {INPROC-1998-20,
author = {J{\"u}rgen Sellentin and Bernhard Mitschang},
title = {{Data Intensive Intra- \& Internet Applications - An Example Using Java and CORBA in the World Wide Web}},
booktitle = {Proceedings of the Fourteenth International Conference on Data Engineering, February 23-27, 1998, Orlando, Florida, USA},
publisher = {IEEE},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
pages = {302--311},
type = {Conference Paper},
month = {February},
year = {1998},
isbn = {0-8186-8289-2},
language = {English},
cr-category = {H.4 Information Systems Applications,
H.2.4 Database Management Systems,
H.2.5 Heterogeneous Databases},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Intra/Internet technology has become a key issue in the development of modern
systems. Nowadays it is not sufficient anymore to present static information
sheets through the WWW, instead we need interactive applications that may even
compute complex results or process large data sets. In this paper we describe a
prototype based on Java and CORBA. Both represent modern concepts that have
been developed to fulfill these requirements. Their combination results into
the kind of data processing we want to apply to the WWW: First, portable,
powerful, structured and even reusable client programs instead of cryptic HTML
scripts, second, well defined interfaces, and third, efficient server processes
separated from the WWW server and its CGI extensions. Communication is
controlled by a fault tolerant CORBA layer, which also enables server
development using a different language than Java. Besides a discussion of CORBA
and its data shipping capabilities, we take a closer look at Java and its
runtime behavior, and we report on the experiences gathered with our prototype
system and its testbed application. This system has also been used to gather
experiences with and to influence the new language binding of the Standard Data
Access Interface (SDAI) of the Standard for the Exchange of Product Data (STEP,
ISO 10303) to Java.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-1998-20&engl=1}
}
@inproceedings {INPROC-1998-19,
author = {Clara Nippl and Bernhard Mitschang},
title = {{TOPAZ: a Cost-Based, Rule-Driven, Multi-Phase Parallelizer}},
booktitle = {VLDB'98, Proceedings of 24rd International Conference on Very Large Data Bases, New York City, New York, USA, August 24-27, 1998},
publisher = {Morgan Kaufmann},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
pages = {251--262},
type = {Conference Paper},
month = {August},
year = {1998},
isbn = {1-55860-566-5},
language = {English},
cr-category = {H.2.4 Database Management Systems},
contact = {Bernhard Mitschang mitsch@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Currently the key problems of query optimization are extensibility imposedby
object-relational technology, as well as query complexity caused by forthcoming
applications, such as OLAP. We propose a generic approach to parallelization,
called TOPAZ. Different forms of parallelism are exploited to obtain maximum
speedup combined with lowest resource consumption. The necessary abstractions
w.r.t. operator characteristics and system architecture are provided by rules
that are used by a cost-based, top-down search engine. A multi-phase pruning
based on a global analysis of the plan efficiently guides the search process,
thus considerably reducing complexity and achieving optimization performance.
Since TOPAZ solely relies on the widespread concepts of iterators and
datarivers common to (parallel) execution models, it fits as an enabling
technology into most state-of-the-art (object-) relational systems.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-1998-19&engl=1}
}
@inproceedings {INPROC-1998-18,
author = {Michael Jaedicke and Bernhard Mitschang},
title = {{On Parallel Processing of Aggregate and Scalar Functions in Object-Relational DBMS}},
booktitle = {Proceedings ACM SIGMOD International Conference on Management of Data, Seattle, Washington, USA, June 2-4, 1998},
publisher = {ACM Press},
institution = {University of Stuttgart, Faculty of Computer Science, Germany},
pages = {379--389},
type = {Conference Paper},
month = {June},
year = {1998},
isbn = {0-89791-995-5},
language = {English},
cr-category = {H.2.4 Database Management Systems},
contact = {Bernhard Mitschang mitsch@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Nowadays parallel object-relational DBMS are envisioned as the next great wave,
but there is still a lack of efficient implementation concepts for some parts
of the proposed functionality. Thus one of the current goals for parallel
object-relational DBMS is to move towards higher performance. In this paper we
develop a framework that allows to process user-defined functions with data
parallelism. We will describe the class of partitionable functions that can be
processed parallelly. We will also propose an extension which allows to speed
up the processing of another large class of functions by means of parallel
sorting. Functions that can be processed by means of our techniques are often
used in decision support queries on large data volumes, for example. Hence a
parallel execution is indispensable.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-1998-18&engl=1}
}
@article {ART-2024-01,
author = {Jan Schneider and Christoph Gr{\"o}ger and Arnold Lutsch and Holger Schwarz and Bernhard Mitschang},
title = {{The Lakehouse: State of the Art on Concepts and Technologies}},
journal = {SN Computer Science},
publisher = {Springer Nature},
volume = {5},
number = {5},
pages = {1--39},
type = {Article in Journal},
month = {April},
year = {2024},
issn = {2661-8907},
doi = {10.1007/s42979-024-02737-0},
keywords = {Data Lakehouse; Data Lake; Data Platform; Data Analytics},
language = {English},
cr-category = {H.3.4 Information Storage and Retrieval Systems and Software},
ee = {https://doi.org/10.1007/s42979-024-02737-0,
https://link.springer.com/content/pdf/10.1007/s42979-024-02737-0.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In the context of data analytics, so-called lakehouses refer to novel variants
of data platforms that attempt to combine characteristics of data warehouses
and data lakes. In this way, lakehouses promise to simplify enterprise
analytics architectures, which often suffer from high operational costs, slow
analytical processes and further shortcomings resulting from data replication.
However, different views and notions on the lakehouse paradigm exist, which are
commonly driven by individual technologies and varying analytical use cases.
Therefore, it remains unclear what challenges lakehouses address, how they can
be characterized and which technologies can be leveraged to implement them.
This paper addresses these issues by providing an extensive overview of
concepts and technologies that are related to the lakehouse paradigm and by
outlining lakehouses as a distinct architectural approach for data platforms.
Concepts and technologies from literature with regard to lakehouses are
discussed, based on which a conceptual foundation for lakehouses is
established. In addition, several popular technologies are evaluated regarding
their suitability for the building of lakehouses. All findings are supported
and demonstrated with the help of a representative analytics scenario. Typical
challenges of conventional data platforms are identified, a new, sharper
definition for lakehouses is proposed and technical requirements for lakehouses
are derived. As part of an evaluation, these requirements are applied to
several popular technologies, of which frameworks for data lakes turn out to be
particularly helpful for the construction of lakehouses. Our work provides an
overview of the state of the art and a conceptual foundation for the lakehouse
paradigm, which can support future research.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2024-01&engl=1}
}
@article {ART-2023-07,
author = {Rebecca Eichler and Christoph Gr{\"o}ger and Eva Hoos and Christoph Stach and Holger Schwarz and Bernhard Mitschang},
title = {{Introducing the enterprise data marketplace: a platform for democratizing company data}},
journal = {Journal of Big Data},
publisher = {Springer Nature},
volume = {10},
pages = {1--38},
type = {Article in Journal},
month = {November},
year = {2023},
issn = {2196-1115},
doi = {10.1186/s40537-023-00843-z},
keywords = {Data Catalog; Data Democratization; Data Market; Data Sharing; Enterprise Data Marketplace; Metadata Management},
language = {English},
cr-category = {E.m Data Miscellaneous,
H.3.7 Digital Libraries,
H.4.m Information Systems Applications Miscellaneous},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In this big data era, multitudes of data are generated and collected which
contain the potential to gain new insights, e.g., for enhancing business
models. To leverage this potential through, e.g., data science and analytics
projects, the data must be made available. In this context, data marketplaces
are used as platforms to facilitate the exchange and thus, the provisioning of
data and data-related services. Data marketplaces are mainly studied for the
exchange of data between organizations, i.e., as external data marketplaces.
Yet, the data collected within a company also has the potential to provide
valuable insights for this same company, for instance to optimize business
processes. Studies indicate, however, that a significant amount of data within
companies remains unused. In this sense, it is proposed to employ an Enterprise
Data Marketplace, a platform to democratize data within a company among its
employees. Specifics of the Enterprise Data Marketplace, how it can be
implemented or how it makes data available throughout a variety of systems like
data lakes has not been investigated in literature so far. Therefore, we
present the characteristics and requirements of this kind of marketplace. We
also distinguish it from other tools like data catalogs, provide a platform
architecture and highlight how it integrates with the company{\^a}€™s system
landscape. The presented concepts are demonstrated through an Enterprise Data
Marketplace prototype and an experiment reveals that this marketplace
significantly improves the data consumer workflows in terms of efficiency and
complexity. This paper is based on several interdisciplinary works combining
comprehensive research with practical experience from an industrial
perspective. We therefore present the Enterprise Data Marketplace as a distinct
marketplace type and provide the basis for establishing it within a company.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2023-07&engl=1}
}
@article {ART-2023-06,
author = {Christoph Stach and Cl{\'e}mentine Gritti},
title = {{Editorial to the Special Issue on Security and Privacy in Blockchains and the IoT Volume II}},
journal = {Future Internet},
address = {Basel, Schweiz},
publisher = {MDPI},
volume = {15},
number = {8},
pages = {1--7},
type = {Article in Journal},
month = {August},
year = {2023},
issn = {1999-5903},
doi = {10.3390/fi15080272},
language = {English},
cr-category = {D.4.6 Operating Systems Security and Protection,
K.4.1 Computers and Society Public Policy Issues,
K.6.5 Security and Protection},
ee = {https://www.mdpi.com/1999-5903/15/8/272/htm},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de.},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In this age of data-driven transformation, where the fusion of blockchain
technologies and the Internet of Things (IoT) is shaping the fabric of our
digital society, the need for security and privacy has never been more
important. This Special Issue delves into the intricate confluence of these two
disruptive forces and provides a comprehensive overview of security and privacy
aspects in this regard. Focusing on protection goals such as confidentiality,
integrity, availability, and privacy, this compilation encapsulates the essence
of these multi-layered challenges. Ranging from complex data-driven
applications and smart services to novel approaches that enhance security and
privacy in the context of blockchain technologies and the IoT, the research
articles and literature reviews presented here offer a sophisticated mesh of
insights. Innovative solutions are highlighted from a variety of perspectives,
and challenges such as secure data transmission, confidential communication,
and tamper-proof data storage are explored.
In this way, this Special Issue is a beacon for practitioners, researchers, and
technology enthusiasts. Developers seeking to harness the potential of
blockchain technology and IoT find rich insights while users get a
comprehensive overview of the latest research and trends. The symphony of
interdisciplinary knowledge presented here creates a harmonious blend of theory
and practice, highlighting the intricate interdependencies between
technological advances and the need for security and privacy.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2023-06&engl=1}
}
@article {ART-2023-05,
author = {Christoph Stach and Rebecca Eichler and Simone Schmidt},
title = {{A Recommender Approach to Enable Effective and Efficient Self-Service Analytics in Data Lakes}},
journal = {Datenbank-Spektrum},
publisher = {Springer Nature},
volume = {23},
number = {2},
pages = {123--132},
type = {Article in Journal},
month = {June},
year = {2023},
issn = {1618-2162},
doi = {10.1007/s13222-023-00443-4},
keywords = {Data Lake; Data Preparation; Data Pre-Processing; Data Refinement; Recommender; Self-Service Analytics},
language = {English},
cr-category = {H.2.7 Database Administration,
E.2 Data Storage Representations,
H.3.3 Information Search and Retrieval,
H.2.8 Database Applications},
contact = {Senden Sie eine E-Mail an christoph.stach@ipvs.uni-stuttgart.de.},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {As a result of the paradigm shift away from rather rigid data warehouses to
general-purpose data lakes, fully flexible self-service analytics is made
possible. However, this also increases the complexity for domain experts who
perform these analyses, since comprehensive data preparation tasks have to be
implemented for each data access. For this reason, we developed BARENTS, a
toolset that enables domain experts to specify data preparation tasks as
ontology rules, which are then applied to the data involved. Although our
evaluation of BARENTS showed that it is a valuable contribution to self-service
analytics, a major drawback is that domain experts do not receive any semantic
support when specifying the rules. In this paper, we therefore address how a
recommender approach can provide additional support to domain experts by
identifying supplementary datasets that might be relevant for their analyses or
additional data processing steps to improve data refinement. This recommender
operates on the set of data preparation rules specified in BARENTS-i.e., the
accumulated knowledge of all domain experts is factored into the data
preparation for each new analysis. Evaluation results indicate that such a
recommender approach further contributes to the practicality of BARENTS and
thus represents a step towards effective and efficient self-service analytics
in data lakes.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2023-05&engl=1}
}
@article {ART-2023-04,
author = {Alejandro Gabriel Villanueva Zacarias and Peter Reimann and Christian Weber and Bernhard Mitschang},
title = {{AssistML: An Approach to Manage, Recommend and Reuse ML Solutions}},
journal = {International Journal of Data Science and Analytics (JDSA)},
publisher = {Springer Nature},
type = {Article in Journal},
month = {July},
year = {2023},
keywords = {Meta-learning; Machine learning; AutoML; Metadata; Recommender systems},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The adoption of machine learning (ML) in organizations is characterized by the
use of multiple ML software components. When building ML systems out of these
software components, citizen data scientists face practical requirements which
go beyond the known challenges of ML, e.g., data engineering or parameter
optimization. They are expected to quickly identify ML system options that
strike a suitable trade-off across multiple performance criteria. These options
also need to be understandable for non-technical users. Addressing these
practical requirements represents a problem for citizen data scientists with
limited ML experience. This calls for a concept to help them identify suitable
ML software combinations. Related work, e.g., AutoML systems, are not
responsive enough or cannot balance different performance criteria. This paper
explains how AssistML, a novel concept to recommend ML solutions, i.e.,
software systems with ML models, can be used as an alternative for predictive
use cases. Our concept collects and preprocesses metadata of existing ML
solutions to quickly identify the ML solutions that can be reused in a new use
case. We implement AssistML and evaluate it with two exemplary use cases.
Results show that AssistML can recommend ML solutions in line with users{\^a}€™
performance preferences in seconds. Compared to AutoML, AssistML offers citizen
data scientists simpler, intuitively explained ML solutions in considerably
less time. Moreover, these solutions perform similarly or even better than
AutoML models.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2023-04&engl=1}
}
@article {ART-2023-03,
author = {Dennis Treder-Tschechlov and Manuel Fritz and Holger Schwarz and Bernhard Mitschang},
title = {{ML2DAC: Meta-Learning to Democratize AutoML for Clustering Analysis}},
journal = {Proceedings of the ACM on Management of Data (SIGMOD)},
publisher = {Association for Computing Machinery (ACM)},
volume = {1},
number = {2},
pages = {1--26},
type = {Article in Journal},
month = {June},
year = {2023},
doi = {10.1145/3589289},
language = {German},
cr-category = {I.5.3 Pattern Recognition Clustering},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Analysts often struggle with the combined algorithm selection and
hyperparameter optimization problem, a.k.a. CASH problem in literature.
Typically, they execute several algorithms with varying hyperparameter settings
to find configurations that show valuable results. Efficiently finding these
configurations is a major challenge. In clustering analyses, analysts face the
additional challenge to select a cluster validity index that allows them to
evaluate clustering results in a purely unsupervised fashion. Many different
cluster validity indices exist and each one has its benefits depending on the
dataset characteristics. While experienced analysts might address these
challenges using their domain knowledge and experience, especially novice
analysts struggle with them. In this paper, we propose a new meta-learning
approach to address these challenges. Our approach uses knowledge from past
clustering evaluations to apply strategies that experienced analysts would
exploit. In particular, we use meta-learning to (a) select a suitable
clustering validity index, (b) efficiently select well-performing clustering
algorithm and hyperparameter configurations, and (c) reduce the search space to
suitable clustering algorithms. In the evaluation, we show that our approach
significantly outperforms state-of-the-art approaches regarding accuracy and
runtime.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2023-03&engl=1}
}
@article {ART-2023-02,
author = {Vitali Hirsch and Peter Reimann and Dennis Treder-Tschechlov and Holger Schwarz and Bernhard Mitschang},
title = {{Exploiting Domain Knowledge to address Class Imbalance and a Heterogeneous Feature Space in Multi-Class Classification}},
journal = {International Journal on Very Large Data Bases (VLDB-Journal)},
publisher = {Springer},
type = {Article in Journal},
month = {February},
year = {2023},
keywords = {Classification; Domain knowledge; Multi-class Imbalance; Heterogeneous feature space},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Real-world data of multi-class classification tasks often show complex data
characteristics that lead to a reduced classification performance. Major
analytical challenges are a high degree of multi-class imbalance within data
and a heterogeneous feature space, which increases the number and complexity of
class patterns. Existing solutions to classification or data pre- processing
only address one of these two challenges in isolation. We propose a novel
classification approach that explicitly addresses both challenges of
multi-class imbalance and heterogeneous feature space together. As main
contribution, this approach exploits domain knowledge in terms of a taxonomy to
systematically prepare the training data. Based on an experimental evaluation
on both real-world data and several synthetically generated data sets, we show
that our approach outperforms any other classification technique in terms of
accuracy. Furthermore, it entails considerable practical benefits in real-world
use cases, e.g., it reduces rework required in the area of product quality
control.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2023-02&engl=1}
}
@article {ART-2023-01,
author = {Christoph Stach},
title = {{Data Is the New Oil--Sort of: A View on Why This Comparison Is Misleading and Its Implications for Modern Data Administration}},
journal = {Future Internet},
publisher = {MDPI},
volume = {15},
number = {2},
pages = {1--49},
type = {Article in Journal},
month = {February},
year = {2023},
issn = {1999-5903},
doi = {10.3390/fi15020071},
keywords = {data characteristics; data administration; data refinement; reliability; security; privacy},
language = {English},
cr-category = {E.0 Data General,
H.3 Information Storage and Retrieval,
K.6.5 Security and Protection,
K.4.1 Computers and Society Public Policy Issues},
ee = {https://www.mdpi.com/1999-5903/15/2/71/htm},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de.},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Currently, data are often referred to as the oil of the 21st century. This
comparison is not only used to express that the resource data are just as
important for the fourth industrial revolution as oil was for the technological
revolution in the late 19th century. There are also further similarities
between these two valuable resources in terms of their handling. Both must
first be discovered and extracted from their sources. Then, the raw materials
must be cleaned, preprocessed, and stored before they can finally be delivered
to consumers. Despite these undeniable similarities, however, there are
significant differences between oil and data in all of these processing steps,
making data a resource that is considerably more challenging to handle. For
instance, data sources, as well as the data themselves, are heterogeneous,
which means there is no one-size-fits-all data acquisition solution.
Furthermore, data can be distorted by the source or by third parties without
being noticed, which affects both quality and usability. Unlike oil, there is
also no uniform refinement process for data, as data preparation should be
tailored to the subsequent consumers and their intended use cases. With regard
to storage, it has to be taken into account that data are not consumed when
they are processed or delivered to consumers, which means that the data volume
that has to be managed is constantly growing. Finally, data may be subject to
special constraints in terms of distribution, which may entail individual
delivery plans depending on the customer and their intended purposes. Overall,
it can be concluded that innovative approaches are needed for handling the
resource data that address these inherent challenges. In this paper, we
therefore study and discuss the relevant characteristics of data making them
such a challenging resource to handle. In order to enable appropriate data
provisioning, we introduce a holistic research concept from data source to data
sink that respects the processing requirements of data producers as well as the
quality requirements of data consumers and, moreover, ensures a trustworthy
data administration.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2023-01&engl=1}
}
@article {ART-2022-09,
author = {Christoph Stach},
title = {{Editorial to the Special Issue on Security and Privacy in Blockchains and the IoT}},
journal = {Future Internet},
publisher = {MDPI},
volume = {14},
number = {11},
pages = {1--4},
type = {Article in Journal},
month = {November},
year = {2022},
issn = {1999-5903},
doi = {10.3390/fi14110317},
language = {English},
cr-category = {D.4.6 Operating Systems Security and Protection,
K.4.1 Computers and Society Public Policy Issues,
K.6.5 Security and Protection},
ee = {https://www.mdpi.com/1999-5903/14/11/317/htm},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de.},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In this day and age, data have become an immensely valuable resource. They are
the key driver that puts the smart into smart services. This is fundamentally
fueled by two technological achievements, namely the Internet of Things (IoT),
which enables continuous and comprehensive collection of all kinds of data, and
blockchain technologies, which provide secure data management and exchange. In
addition to those information security measures, however, data privacy
solutions are also required to protect the involved sensitive data. In this
Special Issue, eight research papers address security and privacy challenges
when dealing with blockchain technologies and the IoT. Concerning the IoT,
solutions are presented on how IoT group communication can be secured and how
trust within IoT applications can be increased. In the context of blockchain
technologies, approaches are introduced on how the capabilities of query
processing can be enhanced and how a proof-of-work consensus protocol can be
efficiently applied in IoT environments. Furthermore, it is discussed how
blockchain technologies can be used in IoT environments to control access to
confidential IoT data as well as to enable privacy-aware data sharing. Finally,
two reviews give an overview of the state of the art in in-app activity
recognition based on convolutional neural networks and the prospects for
blockchain technology applications in ambient assisted living.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2022-09&engl=1}
}
@article {ART-2022-08,
author = {Christoph Stach and Cl{\'e}mentine Gritti and Dennis Przytarski and Bernhard Mitschang},
title = {{Assessment and Treatment of Privacy Issues in Blockchain Systems}},
journal = {ACM SIGAPP Applied Computing Review},
publisher = {ACM},
volume = {22},
number = {3},
pages = {5--24},
type = {Article in Journal},
month = {September},
year = {2022},
issn = {1559-6915},
keywords = {blockchain; decentralized; immutable; tamper-proof; GDPR; privacy assessment; data purging; data authentication; permission control; privacy filters; privacy control environment},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
K.6.5 Security and Protection},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de.},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The ability to capture and quantify any aspect of daily life via sensors,
enabled by the Internet of Things (IoT), data have become one of the most
important resources of the 21st century. However, the high value of data also
renders data an appealing target for criminals. Two key protection goals when
dealing with data are therefore to maintain their permanent availability and to
ensure their integrity. Blockchain technology provides a means of data
protection that addresses both of these objectives. On that account,
blockchains are becoming increasingly popular for the management of critical
data. As blockchains are operated in a decentralized manner, they are not only
protected against failures, but it is also ensured that neither party has sole
control over the managed data. Furthermore, blockchains are immutable and
tamper-proof data stores, whereby data integrity is guaranteed. While these
properties are preferable from a data security perspective, they also pose a
threat to privacy and confidentiality, as data cannot be concealed, rectified,
or deleted once they are added to the blockchain.
In this paper, we therefore investigate which features of the blockchain pose
an inherent privacy threat when dealing with personal or confidential data. To
this end, we consider to what extent blockchains are in compliance with
applicable data protection laws, namely the European General Data Protection
Regulation (GDPR). Based on our identified key issues, we assess which concepts
and technical measures can be leveraged to address these issues in order to
create a privacy-by-design blockchain system.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2022-08&engl=1}
}
@article {ART-2022-07,
author = {Christoph Stach and Cl{\'e}mentine Gritti and Julia Br{\"a}cker and Michael Behringer and Bernhard Mitschang},
title = {{Protecting Sensitive Data in the Information Age: State of the Art and Future Prospects}},
journal = {Future Internet},
publisher = {MDPI},
volume = {14},
number = {11},
pages = {1--42},
type = {Article in Journal},
month = {October},
year = {2022},
issn = {1999-5903},
doi = {10.3390/fi14110302},
keywords = {smart service; privacy techniques; location-based services; health services; voice-controlled digital assistants; image analysis; food analysis; recommender systems; DNA sequence classification},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
K.6.5 Security and Protection},
ee = {https://www.mdpi.com/1999-5903/14/11/302/htm},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de.},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The present information age is characterized by an ever-increasing
digitalization. Smart devices quantify our entire lives. These collected data
provide the foundation for data-driven services called smart services. They are
able to adapt to a given context and thus tailor their functionalities to the
user's needs. It is therefore not surprising that their main resource, namely
data, is nowadays a valuable commodity that can also be traded. However, this
trend does not only have positive sides, as the gathered data reveal a lot of
information about various data subjects. To prevent uncontrolled insights into
private or confidential matters, data protection laws restrict the processing
of sensitive data. One key factor in this regard is user-friendly privacy
mechanisms. In this paper, we therefore assess current state-of-the-art privacy
mechanisms. To this end, we initially identify forms of data processing applied
by smart services. We then discuss privacy mechanisms suited for these use
cases. Our findings reveal that current state-of-the-art privacy mechanisms
provide good protection in principle, but there is no compelling
one-size-fits-all privacy approach. This leads to further questions regarding
the practicality of these mechanisms, which we present in the form of seven
thought-provoking propositions.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2022-07&engl=1}
}
@article {ART-2022-06,
author = {Christoph Stach and Michael Behringer and Julia Br{\"a}cker and Cl{\'e}mentine Gritti and Bernhard Mitschang},
title = {{SMARTEN — A Sample-Based Approach towards Privacy-Friendly Data Refinement}},
journal = {Journal of Cybersecurity and Privacy},
publisher = {MDPI},
volume = {2},
number = {3},
pages = {606--628},
type = {Article in Journal},
month = {August},
year = {2022},
issn = {2624-800X},
doi = {10.3390/jcp2030031},
keywords = {privacy; data refinement; data cleansing; data transformation; human-in-the-loop},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
K.6.5 Security and Protection},
ee = {https://www.mdpi.com/2624-800X/2/3/31/htm},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de.},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Two factors are crucial for the effective operation of modern-day smart
services: Initially, IoT-enabled technologies have to capture and combine huge
amounts of data on data subjects. Then, all these data have to be processed
exhaustively by means of techniques from the area of big data analytics. With
regard to the latter, thorough data refinement in terms of data cleansing and
data transformation is the decisive cornerstone. Studies show that data
refinement reaches its full potential only by involving domain experts in the
process. However, this means that these experts need full insight into the data
in order to be able to identify and resolve any issues therein, e.g., by
correcting or removing inaccurate, incorrect, or irrelevant data records. In
particular for sensitive data (e.g., private data or confidential data), this
poses a problem, since these data are thereby disclosed to third parties such
as domain experts. To this end, we introduce SMARTEN, a sample-based approach
towards privacy-friendly data refinement to smarten up big data analytics and
smart services. SMARTEN applies a revised data refinement process that fully
involves domain experts in data pre-processing but does not expose any
sensitive data to them or any other third-party. To achieve this, domain
experts obtain a representative sample of the entire data set that meets all
privacy policies and confidentiality guidelines. Based on this sample, domain
experts define data cleaning and transformation steps. Subsequently, these
steps are converted into executable data refinement rules and applied to the
entire data set. Domain experts can request further samples and define further
rules until the data quality required for the intended use case is reached.
Evaluation results confirm that our approach is effective in terms of both data
quality and data privacy.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2022-06&engl=1}
}
@article {ART-2022-01,
author = {Christoph Stach and Julia Br{\"a}cker and Rebecca Eichler and Corinna Giebler and Bernhard Mitschang},
title = {{Simplified Specification of Data Requirements for Demand-Actuated Big Data Refinement}},
journal = {Journal of Data Intelligence},
publisher = {Rinton Press},
volume = {3},
number = {3},
pages = {366--400},
type = {Article in Journal},
month = {August},
year = {2022},
issn = {2577-610X},
keywords = {data pre-processing; data transformation; knowledge modeling; ontology; data management; Data Lakes; zone model; food analysis},
language = {English},
cr-category = {H.2.7 Database Administration,
E.2 Data Storage Representations,
H.3.3 Information Search and Retrieval,
H.2.8 Database Applications},
contact = {Senden Sie eine E-Mail an christoph.stach@ipvs.uni-stuttgart.de.},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Data have become one of the most valuable resources in modern society. Due to
increasing digitalization and the growing prevalence of the Internet of Things,
it is possible to capture data on any aspect of today's life. Similar to
physical resources, data have to be refined before they can become a profitable
asset. However, such data preparation entails completely novel challenges: For
instance, data are not consumed when being processed, whereby the volume of
available data that needs to be managed increases steadily. Furthermore, the
data preparation has to be tailored to the intended use case in order to
achieve an optimal outcome. This, however, requires the knowledge of domain
experts. Since such experts are typically not IT experts, they need tools that
enable them to specify the data requirements of their use cases in a
user-friendly manner. The goal of this data preparation is to provide any
emerging use case with demand-actuated data.
With this in mind, we designed a tailorable data preparation zone for Data
Lakes called BARENTS. It provides a simplified method for domain experts to
specify how data must be pre-processed for their use cases, and these data
preparation steps are then applied automatically. The data requirements are
specified by means of an ontology-based method which is comprehensible to
non-IT experts. Data preparation and provisioning are realized
resource-efficient by implementing BARENTS as a dedicated zone for Data Lakes.
This way, BARENTS is seamlessly embeddable into established Big Data
infrastructures.
This article is an extended and revised version of the conference paper
``Demand-Driven Data Provisioning in Data Lakes: BARENTS - A Tailorable Data
Preparation Zone'' by Stach et al. In comparison to our original conference
paper, we take a more detailed look at related work in the paper at hand. The
emphasis of this extended and revised version, however, is on strategies to
improve the performance of BARENTS and enhance its functionality. To this end,
we discuss in-depth implementation details of our prototype and introduce a
novel recommender system in BARENTS that assists users in specifying data
preparation steps.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2022-01&engl=1}
}
@article {ART-2021-05,
author = {Manuel Fritz and Michael Behringer and Dennis Tschechlov and Holger Schwarz},
title = {{Efficient exploratory clustering analyses in large-scale exploration processes}},
journal = {The VLDB Journal},
editor = {Georgia Koutrika and Ren{\'e}e J. Miller and Kyuseok Shim},
address = {Berlin, Heidelberg},
publisher = {Springer Berlin Heidelberg},
pages = {1--22},
type = {Article in Journal},
month = {November},
year = {2021},
doi = {10.1007/s00778-021-00716-y},
issn = {1066-8888},
keywords = {Exploratory clustering analysis; Exploration; Clustering; Centroid-based clustering},
language = {German},
cr-category = {H.3.3 Information Search and Retrieval},
contact = {Senden Sie eine E-Mail an manuel.fritz@ipvs.uni-stuttgart.de.},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Clustering is a fundamental primitive in manifold applications. In order to
achieve valuable results in exploratory clustering analyses, parameters of the
clustering algorithm have to be set appropriately, which is a tremendous
pitfall. We observe multiple challenges for large-scale exploration processes.
On the one hand, they require specific methods to efficiently explore large
parameter search spaces. On the other hand, they often exhibit large runtimes,
in particular when large datasets are analyzed using clustering algorithms with
super-polynomial runtimes, which repeatedly need to be executed within
exploratory clustering analyses. We address these challenges as follows: First,
we present LOG-Means and show that it provides estimates for the number of
clusters in sublinear time regarding the defined search space, i.e., provably
requiring less executions of a clustering algorithm than existing methods.
Second, we demonstrate how to exploit fundamental characteristics of
exploratory clustering analyses in order to significantly accelerate the
(repetitive) execution of clustering algorithms on large datasets. Third, we
show how these challenges can be tackled at the same time. To the best of our
knowledge, this is the first work which simultaneously addresses the
above-mentioned challenges. In our comprehensive evaluation, we unveil that our
proposed methods significantly outperform state-of-the-art methods, thus
especially supporting novice analysts for exploratory clustering analyses in
large-scale exploration processes.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2021-05&engl=1}
}
@article {ART-2021-04,
author = {Dennis Przytarski and Christoph Stach and Cl{\'e}mentine Gritti and Bernhard Mitschang},
title = {{Query Processing in Blockchain Systems: Current State and Future Challenges}},
journal = {Future Internet},
editor = {Dino Giuli and Andrew Hudson-Smith and Luis Javier Garcia Villalba},
publisher = {MDPI},
volume = {14},
number = {1},
pages = {1--31},
type = {Article in Journal},
month = {December},
year = {2021},
issn = {1999-5903},
doi = {10.3390/fi14010001},
keywords = {blockchain systems; query processing; data models; data structures; block structures},
language = {English},
cr-category = {H.3.0 Information Storage and Retrieval General,
H.3.3 Information Search and Retrieval},
contact = {Senden Sie eine E-Mail an Dennis.Przytarski@ipvs.uni-stuttgart.de.},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {When, in 2008, Satoshi Nakamoto envisioned the first distributed database
management system that relied on cryptographically secured chain of blocks to
store data in an immutable and tamper-resistant manner, his primary use case
was the introduction of a digital currency. Owing to this use case, the
blockchain system was geared towards efficient storage of data, whereas the
processing of complex queries, such as provenance analyses of data history, is
out of focus. The increasing use of Internet of Things technologies and the
resulting digitization in many domains, however, have led to a plethora of
novel use cases for a secure digital ledger. For instance, in the healthcare
sector, blockchain systems are used for the secure storage and sharing of
electronic health records, while the food industry applies such systems to
enable a reliable food-chain traceability, e.g., to prove compliance with cold
chains. In these application domains, however, querying the current state is
not sufficient - comprehensive history queries are required instead. Due to
these altered usage modes involving more complex query types, it is
questionable whether today's blockchain systems are prepared for this type of
usage and whether such queries can be processed efficiently by them. In our
paper, we therefore investigate novel use cases for blockchain systems and
elicit their requirements towards a data store in terms of query capabilities.
We reflect the state of the art in terms of query support in blockchain systems
and assess whether it is capable of meeting the requirements of such more
sophisticated use cases. As a result, we identify future research challenges
with regard to query processing in blockchain systems.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2021-04&engl=1}
}
@article {ART-2021-03,
author = {Rebecca Eichler and Corinna Giebler and Christoph Gr{\"o}ger and Holger Schwarz and Bernhard Mitschang},
title = {{Modeling metadata in data lakes—A generic model}},
journal = {Data \& Knowledge Engineering},
publisher = {Elsevier},
volume = {136},
pages = {1--17},
type = {Article in Journal},
month = {November},
year = {2021},
issn = {0169-023X},
doi = {10.1016/j.datak.2021.101931},
keywords = {Metadata management; Metadata model; Data lake; Data management; Data lake zones; Metadata classification},
language = {English},
cr-category = {H.2 Database Management},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Data contains important knowledge and has the potential to provide new
insights. Due to new technological developments such as the Internet of Things,
data is generated in increasing volumes. In order to deal with these data
volumes and extract the data{\^a}{\^a}‚¬{\^a}„¢s value new concepts such as the data lake
were created. The data lake is a data management platform designed to handle
data at scale for analytical purposes. To prevent a data lake from becoming
inoperable and turning into a data swamp, metadata management is needed. To
store and handle metadata, a generic metadata model is required that can
reflect metadata of any potential metadata management use case, e.g., data
versioning or data lineage. However, an evaluation of existent metadata models
yields that none so far are sufficiently generic as their design basis is not
suited. In this work, we use a different design approach to build HANDLE, a
generic metadata model for data lakes. The new metadata model supports the
acquisition of metadata on varying granular levels, any metadata
categorization, including the acquisition of both metadata that belongs to a
specific data element as well as metadata that applies to a broader range of
data. HANDLE supports the flexible integration of metadata and can reflect the
same metadata in various ways according to the intended utilization.
Furthermore, it is created for data lakes and therefore also supports data lake
characteristics like data lake zones. With these capabilities HANDLE enables
comprehensive metadata management in data lakes. HANDLE{\^a}{\^a}‚¬{\^a}„¢s feasibility is
shown through the application to an exemplary access-use-case and a
prototypical implementation. By comparing HANDLE with existing models we
demonstrate that it can provide the same information as the other models as
well as adding further capabilities needed for metadata management in data
lakes.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2021-03&engl=1}
}
@article {ART-2020-20,
author = {Corinna Giebler and Christoph Gr{\"o}ger and Eva Hoos and Rebecca Eichler and Holger Schwarz and Bernhard Mitschang},
title = {{Data Lakes auf den Grund gegangen - Herausforderungen und Forschungsl{\"u}cken in der Industriepraxis}},
journal = {Datenbank Spektrum},
publisher = {Springer},
volume = {20},
pages = {57--69},
type = {Article in Journal},
month = {January},
year = {2020},
keywords = {Data Lakes; Analytics; Stand der Technik; Herausforderungen; Praxisbeispiel},
language = {German},
cr-category = {H.4 Information Systems Applications},
contact = {Senden Sie eine E-Mail an Corinna.Giebler@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Unternehmen stehen zunehmend vor der Herausforderung, gro{\ss}e, heterogene Daten
zu verwalten und den darin enthaltenen Wert zu extrahieren. In den letzten
Jahren kam darum der Data Lake als neuartiges Konzept auf, um diese komplexen
Daten zu verwalten und zu nutzen. Wollen Unternehmen allerdings einen solchen
Data Lake praktisch umsetzen, so sto{\ss}en sie auf vielf{\"a}ltige
Herausforderungen, wie beispielsweise Widerspr{\"u}che in der Definition oder
unscharfe und fehlende Konzepte. In diesem Beitrag werden konkrete Projekte
eines global agierenden Industrieunternehmens genutzt, um bestehende
Herausforderungen zu identifizieren und Anforderungen an Data Lakes
herzuleiten. Diese Anforderungen werden mit der verf{\"u}gbaren Literatur zum
Thema Data Lake sowie mit existierenden Ans{\"a}tzen aus der Forschung
abgeglichen. Die Gegen{\"u}berstellung zeigt, dass f{\"u}nf gro{\ss}e Forschungsl{\"u}cken
bestehen: 1. Unklare Datenmodellierungsmethoden, 2. Fehlende
Data-Lake-Referenzarchitektur, 3. Unvollst{\"a}ndiges Metadatenmanagementkonzept,
4. Unvollst{\"a}ndiges Data-Lake-Governance-Konzept, 5. Fehlende ganzheitliche
Realisierungsstrategie.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2020-20&engl=1}
}
@article {ART-2020-19,
author = {Christoph Stach and Julia Br{\"a}cker and Rebecca Eichler and Corinna Giebler and Cl{\'e}mentine Gritti},
title = {{How to Provide High-Utility Time Series Data in a Privacy-Aware Manner: A VAULT to Manage Time Series Data}},
journal = {International Journal On Advances in Security},
editor = {Hans-Joachim Hof Hof and Birgit Gersbeck-Schierholz},
publisher = {IARIA},
volume = {13},
number = {3\&4},
pages = {1--21},
type = {Article in Journal},
month = {December},
year = {2020},
issn = {1942-2636},
keywords = {Time Series Data; Privacy Filters; Aggregation; Interpolation; Smoothing; Information Emphasis; Noise; Data Quality; Authentication; Permission Model; Data Management},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
D.4.6 Operating Systems Security and Protection},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Smart Services enrich many aspects of our daily lives, such as in the Ambient
Assisted Living (AAL) domain, where the well-being of patients is automatically
monitored, and patients have more autonomy as a result. A key enabler for such
services is the Internet of Things (IoT). Using IoT-enabled devices, large
amounts of (partly private) data are continuously captured, which can be then
gathered and analyzed by Smart Services. Although these services bring many
conveniences, they therefore also pose a serious threat to privacy. In order to
provide the highest quality of service, they need access to as many data as
possible and even reveal more private information due to in-depth data
analyses. To ensure privacy, however, data minimization is required. Users are
thus forced to balance between service quality and privacy. Current IoT privacy
approaches do not re{\"\i}¬‚ect this discrepancy properly. Furthermore, as users are
often not experienced in the proper handling of privacy mechanisms, this leads
to an overly restrictive behavior. Instead of charging users with privacy
control, we introduce VAULT, a novel approach towards a privacy-aware
management of sensitive data. Since in the IoT time series data have a special
position, VAULT is particularly tailored to this kind of data. It attempts to
achieve the best possible tradeoff between service quality and privacy for each
user. To this end, VAULT manages the data and enables a demand-based and
privacy-aware provision of the data, by applying appropriate privacy {\"\i}¬lters
which ful{\"\i}¬ll not only the quality requirements of the Smart Services but also
the privacy requirements of users. In doing so, VAULT pursues a Privacy by
Design approach.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2020-19&engl=1}
}
@article {ART-2020-11,
author = {Corinna Giebler and Christoph Gr{\"o}ger and Eva Hoos and Rebecca Eichler and Holger Schwarz and Bernhard Mitschang},
title = {{Data Lakes auf den Grund gegangen - Herausforderungen und Forschungsl{\"u}cken in der Industriepraxis}},
journal = {Datenbank Spektrum},
publisher = {Springer-Verlag},
volume = {20},
pages = {57--69},
type = {Article in Journal},
month = {January},
year = {2020},
keywords = {Data Lakes; Industryerfahrung},
language = {German},
cr-category = {H.2.1 Database Management Logical Design},
contact = {Senden Sie eine E-Mail an Corinna.Giebler@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Unternehmen stehen zunehmend vor der Herausforderung, gro{\ss}e, heterogene Daten
zu verwalten und den darin enthaltenen Wert zu extrahieren. In den letzten
Jahren kam darum der Data Lake als neuartiges Konzept auf, um diese komplexen
Daten zu verwalten und zu nutzen. Wollen Unternehmen allerdings einen solchen
Data Lake praktisch umsetzen, so sto{\ss}en sie auf vielf{\"a}ltige
Herausforderungen, wie beispielsweise Widerspr{\"u}che in der Definition oder
unscharfe und fehlende Konzepte. In diesem Beitrag werden konkrete Projekte
eines global agierenden Industrieunternehmens genutzt, um bestehende
Herausforderungen zu identifizieren und Anforderungen an Data Lakes
herzuleiten. Diese Anforderungen werden mit der verf{\"u}gbaren Literatur zum
Thema Data Lake sowie mit existierenden Ans{\"a}tzen aus der Forschung
abgeglichen. Die Gegen{\"u}berstellung zeigt, dass f{\"u}nf gro{\ss}e Forschungsl{\"u}cken
bestehen: 1. Unklare Datenmodellierungsmethoden, 2. Fehlende
Data-Lake-Referenzarchitektur, 3. Unvollst{\"a}ndiges Metadatenmanagementkonzept,
4. Unvollst{\"a}ndiges Data-Lake-Governance-Konzept, 5. Fehlende ganzheitliche
Realisierungsstrategie.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2020-11&engl=1}
}
@article {ART-2020-10,
author = {Corinna Giebler and Christoph Gr{\"o}ger and Eva Hoos and Rebecca Eichler and Holger Schwarz and Bernhard Mitschang},
title = {{Data Lakes auf den Grund gegangen - Herausforderungen und Forschungsl{\"u}cken in der Industriepraxis}},
journal = {Datenbank Spektrum},
publisher = {Springer-Verlag},
volume = {20},
pages = {57--69},
type = {Article in Journal},
month = {January},
year = {2020},
keywords = {Data Lakes; Industryerfahrung},
language = {German},
cr-category = {H.2.1 Database Management Logical Design},
contact = {Senden Sie eine E-Mail an Corinna.Giebler@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Unternehmen stehen zunehmend vor der Herausforderung, gro{\ss}e, heterogene Daten
zu verwalten und den darin enthaltenen Wert zu extrahieren. In den letzten
Jahren kam darum der Data Lake als neuartiges Konzept auf, um diese komplexen
Daten zu verwalten und zu nutzen. Wollen Unternehmen allerdings einen solchen
Data Lake praktisch umsetzen, so sto{\ss}en sie auf vielf{\"a}ltige
Herausforderungen, wie beispielsweise Widerspr{\"u}che in der Definition oder
unscharfe und fehlende Konzepte. In diesem Beitrag werden konkrete Projekte
eines global agierenden Industrieunternehmens genutzt, um bestehende
Herausforderungen zu identifizieren und Anforderungen an Data Lakes
herzuleiten. Diese Anforderungen werden mit der verf{\"u}gbaren Literatur zum
Thema Data Lake sowie mit existierenden Ans{\"a}tzen aus der Forschung
abgeglichen. Die Gegen{\"u}berstellung zeigt, dass f{\"u}nf gro{\ss}e Forschungsl{\"u}cken
bestehen: 1. Unklare Datenmodellierungsmethoden, 2. Fehlende
Data-Lake-Referenzarchitektur, 3. Unvollst{\"a}ndiges Metadatenmanagementkonzept,
4. Unvollst{\"a}ndiges Data-Lake-Governance-Konzept, 5. Fehlende ganzheitliche
Realisierungsstrategie.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2020-10&engl=1}
}
@article {ART-2020-04,
author = {Corinna Giebler and Christoph Gr{\"o}ger and Eva Hoos and Rebecca Eichler and Holger Schwarz and Bernhard Mitschang},
title = {{Data Lakes auf den Grund gegangen: Herausforderungen und Forschungsl{\"u}cken in der Industriepraxis}},
journal = {Datenbank-Spektrum},
publisher = {Springer},
volume = {20},
number = {1},
pages = {57--69},
type = {Article in Journal},
month = {January},
year = {2020},
doi = {10.1007/s13222-020-00332-0},
keywords = {Data Lake; Analytics; Stand der Technik; Herausforderungen; Praxisbeispiel},
language = {German},
cr-category = {A.1 General Literature, Introductory and Survey,
E.0 Data General},
ee = {https://rdcu.be/b0WM8},
contact = {Senden Sie eine E-Mail an Corinna.Giebler@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Unternehmen stehen zunehmend vor der Herausforderung, gro{\ss}e, heterogene Daten
zu verwalten und den darin enthaltenen Wert zu extrahieren. In den letzten
Jahren kam darum der Data Lake als neuartiges Konzept auf, um diese komplexen
Daten zu verwalten und zu nutzen. Wollen Unternehmen allerdings einen solchen
Data Lake praktisch umsetzen, so sto{\ss}en sie auf vielf{\"a}ltige
Herausforderungen, wie beispielsweise Widerspr{\"u}che in der Definition oder
unscharfe und fehlende Konzepte. In diesem Beitrag werden konkrete Projekte
eines global agierenden Industrieunternehmens genutzt, um bestehende
Herausforderungen zu identifizieren und Anforderungen an Data Lakes
herzuleiten. Diese Anforderungen werden mit der verf{\"u}gbaren Literatur zum
Thema Data Lake sowie mit existierenden Ans{\"a}tzen aus der Forschung
abgeglichen. Die Gegen{\"u}berstellung zeigt, dass f{\"u}nf gro{\ss}e Forschungsl{\"u}cken
bestehen: 1. Unklare Datenmodellierungsmethoden, 2. Fehlende
Data-Lake-Referenzarchitektur, 3. Unvollst{\"a}ndiges Metadatenmanagementkonzept,
4. Unvollst{\"a}ndiges Data-Lake-Governance-Konzept, 5. Fehlende ganzheitliche
Realisierungsstrategie.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2020-04&engl=1}
}
@article {ART-2019-24,
author = {Daniel Del Gaudio and Pascal Hirmer},
title = {{A lightweight messaging engine for decentralized data processing in the Internet of Things}},
journal = {SICS Software-Intensive Cyber-Physical Systems},
publisher = {Springer Berlin Heidelberg},
pages = {39--48},
type = {Article in Journal},
month = {August},
year = {2019},
doi = {https://doi.org/10.1007/s00450-019-00410-z},
language = {English},
cr-category = {C.2.4 Distributed Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Today, Internet of Things applications are available throughout many different
domains (manufacturing, health, cities, homes), enabling a high degree of
automation to ease people{\^a}€™s lives. For example, automated heating systems in
a smart home can lead to reduced costs and an increased comfort for the
residents. In the IoT, situations can be detected through interpretation of
data produced by heterogeneous sensors, which typically lead to an invocation
of actuators. In such applications, sensor data is usually streamed to a
central instance for processing. However, especially in time-critical
applications, this is not feasible, since high latency is an issue. To cope
with this problem, in this paper, we introduce an approach for decentralized
data processing in the IoT. This leads to decreased latency as well as a
reduction of costs.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2019-24&engl=1}
}
@article {ART-2019-22,
author = {Dimitri Petrik and Mathias Mormul and Peter Reimann},
title = {{Anforderungen f{\"u}r Zeitreihendatenbanken in der industriellen Edge}},
journal = {HMD Praxis der Wirtschaftsinformatik},
publisher = {Springer-Verlag},
volume = {56},
pages = {1282--1308},
type = {Article in Journal},
month = {October},
year = {2019},
doi = {10.1365/s40702-019-00568-9},
keywords = {Time Series Data; Time Series Database; Industrial IoT; Edge Computing; Defining Requirements; InfluxDB},
language = {German},
cr-category = {E.0 Data General},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Das industrielle Internet der Dinge (iIoT) integriert Informations- und
Kommunikationstechnologien in die industriellen Prozesse und erweitert sie
durch Echtzeit-Datenanalyse. Eine bedeutende Menge an Daten, die in der
industriellen Fertigung generiert werden, sind sensorbasierte Zeitreihendaten,
die in regelm{\"a}{\ss}igen Abst{\"a}nden generiert werden und zus{\"a}tzlich zum
Sensorwert einen Zeitstempel enthalten. Spezielle Zeitreihen-Datenbanken (TSDB)
sind daf{\"u}r ausgelegt, die Zeitreihendaten effizienter zu speichern. Wenn TSDBs
in der N{\"a}he der Maschine (in der industriellen Edge) eingesetzt werden, sind
Maschinendaten zur {\"U}berwachung zeitkritischer Prozesse aufgrund der niedrigen
Latenz schnell verf{\"u}gbar, was die erforderliche Zeit f{\"u}r die
Datenverarbeitung reduziert. Bisherige Untersuchungen zu TSDBs sind bei der
Auswahl f{\"u}r den Einsatz in der industriellen Edge nur begrenzt hilfreich. Die
meisten verf{\"u}gbaren Benchmarks von TSDBs sind performanceorientiert und
ber{\"u}cksichtigen nicht die Einschr{\"a}nkungen der industriellen Edge. Wir
adressieren diese L{\"u}cke und identifizieren die funktionalen Kriterien f{\"u}r den
Einsatz von TSDBs im maschinennahen Umfeld und bilden somit einen qualitativen
Anforderungskatalog. Des Weiteren zeigen wir am Beispiel von InfluxDB, wie
dieser Katalog verwendet werden kann, mit dem Ziel die Auswahl einer geeigneten
TSDB f{\"u}r Sensordaten in der Edge zu unterst{\"u}tzen.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2019-22&engl=1}
}
@article {ART-2019-21,
author = {Christoph Stach},
title = {{Datenschutzkonzepte f{\"u}r Zeitreihendaten: Bewertung von qualit{\"a}tsbewahrenden Transformationsoperatoren zum Schutz privater Datenmuster in IoT-Anwendungen}},
journal = {Datenschutz und Datensicherheit - DuD},
address = {Wiesbaden},
publisher = {Springer Fachmedien},
volume = {43},
number = {12},
pages = {753--759},
type = {Article in Journal},
month = {December},
year = {2019},
issn = {1614-0702},
doi = {10.1007/s11623-019-1201-8},
keywords = {Datenschutz; Zeitreihendaten; IoT; DSGVO},
language = {German},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
G.1.10 Numerical Analysis Applications},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Obwohl das Internet der Dinge (IoT) die Voraussetzung f{\"u}r smarte Anwendungen
schaft, die signifkante Vorteile gegen{\"u}ber traditionellen Anwendungen bieten,
stellt die zunehmende Verbreitung von IoT-f{\"a}higen Ger{\"a}ten auch eine immense
Gef{\"a}hrdung der Privatheit dar. IoT-Anwendungen sammeln eine Vielzahl an Daten
und senden diese zur Verarbeitung an ein Back-End. Hierbei werden umfangreiche
Erkenntnisse {\"u}ber den Nutzer gewonnen. Erst dieses Wissen erm{\"o}glicht die
Servicevielfalt, die IoT-Anwendungen bieten. Der Nutzer muss daher einen
Kompromiss aus Servicequalit{\"a}t und Datenschutz trefen. Heutige
Datenschutzans{\"a}tze ber{\"u}cksichtigen dies unzureichend und sind dadurch h{\"a}ufg
zu restriktiv. Daher stellen wir neue Konzepte zum Schutz privater Daten f{\"u}r
das IoT vor. Diese ber{\"u}cksichtigen die speziellen Eigenschaften von
IoT-Zeitreihendaten. So kann die Privatheit des Nutzers gew{\"a}hrleistet werden,
ohne die Servicequalit{\"a}t unn{\"o}tig einzuschr{\"a}nken.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2019-21&engl=1}
}
@article {ART-2019-12,
author = {Mathias Mormul and Pascal Hirmer and Matthias Wieland and Bernhard Mitschang},
title = {{Distributed Situation Recognition in Industry 4.0}},
journal = {International Journal On Advances in Intelligent Systems},
publisher = {IARIA},
volume = {12},
number = {1},
pages = {39--49},
type = {Article in Journal},
month = {August},
year = {2019},
issn = {1942-2679},
keywords = {Industry 4.0; Edge Computing; Situation Recognition; Distribution Pattern},
language = {English},
cr-category = {E.0 Data General},
ee = {https://www.iariajournals.org/intelligent_systems/intsys_v12_n12_2019_paged.pdf},
contact = {mathias.mormul@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In recent years, advances in the Internet of Things led to new approaches and
applications, for example, in the domains Smart Factories or Smart Cities.
However, with the advantages such applications bring, also new challenges
arise. One of these challenges is the recognition of situations, e.g., machine
failures in Smart Factories. Especially in the domain of industrial
manufacturing, several requirements have to be met in order to deliver a
reliable and efficient situation recognition. One of these requirements is
distribution in order to achieve high efficiency. In this article, we present a
layered modeling approach to enable distributed situation recognition. These
layers include the modeling, the deployment, and the execution of the situation
recognition. Furthermore, we enable tool support to decrease the complexity for
domain users.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2019-12&engl=1}
}
@article {ART-2019-11,
author = {Manuel Fritz and Osama Muazzen and Michael Behringer and Holger Schwarz},
title = {{ASAP-DM: a framework for automatic selection of analytic platforms for data mining}},
journal = {Software-Intensive Cyber-Physical Systems},
publisher = {Springer Berlin Heidelberg},
pages = {1--13},
type = {Article in Journal},
month = {August},
year = {2019},
issn = {2524-8510},
isbn = {2524-8529},
doi = {10.1007/s00450-019-00408-7},
keywords = {Data mining; Analytic platform; Platform selection},
language = {English},
cr-category = {E.0 Data General,
H.2.8 Database Applications,
H.3.3 Information Search and Retrieval},
contact = {manuel.fritz@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The plethora of analytic platforms escalates the difficulty of selecting the
most appropriate analytic platform that fits the needed data mining task, the
dataset as well as additional user-defined criteria. Especially analysts, who
are rather focused on the analytics domain, experience difficulties to keep up
with the latest developments. In this work, we introduce the ASAP-DM framework,
which enables analysts to seamlessly use several platforms, whereas programmers
can easily add several platforms to the framework. Furthermore, we investigate
how to predict a platform based on specific criteria, such as lowest runtime or
resource consumption during the execution of a data mining task. We formulate
this task as an optimization problem, which can be solved by today{\^a}€™s
classification algorithms. We evaluate the proposed framework on several
analytic platforms such as Spark, Mahout, and WEKA along with several data
mining algorithms for classification, clustering, and association rule
discovery. Our experiments unveil that the automatic selection process can save
up to 99.71\% of the execution time due to automatically choosing a faster
platform.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2019-11&engl=1}
}
@article {ART-2019-10,
author = {Cornelia Kiefer and Peter Reimann and Bernhard Mitschang},
title = {{QUALM: Ganzheitliche Messung und Verbesserung der Datenqualit{\"a}t in der Textanalyse}},
journal = {Datenbank-Spektrum},
publisher = {Springer Verlag},
pages = {1--12},
type = {Article in Journal},
month = {June},
year = {2019},
doi = {https://doi.org/10.1007/s13222-019-00318-7},
keywords = {Datenqualit{\"a}t; Textanalyse; Text Mining; Trainingsdaten; Semantische Ressourcen},
language = {German},
cr-category = {H.3 Information Storage and Retrieval},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Bestehende Ans{\"a}tze zur Messung und Verbesserung der Qualit{\"a}t von Textdaten in
der Textanalyse bringen drei gro{\ss}e Nachteile mit sich. Evaluationsmetriken wie
zum Beispiel Accuracy messen die Qualit{\"a}t zwar verl{\"a}sslich, sie (1) sind jedoch
auf aufw{\"a}ndig h{\"a}ndisch zu erstellende Goldannotationen angewiesen und (2) geben
keine Ansatzpunkte f{\"u}r die Verbesserung der Qualit{\"a}t. Erste dom{\"a}nenspezifische
Datenqualit{\"a}tsmethoden f{\"u}r unstrukturierte Textdaten kommen zwar ohne
Goldannotationen aus und geben Ansatzpunkte zur Verbesserung der Datenqualit{\"a}t.
Diese Methoden wurden jedoch nur f{\"u}r begrenzte Anwendungsgebiete entwickelt und
(3) ber{\"u}cksichtigen deshalb nicht die Spezifika vieler Analysetools in
Textanalyseprozessen. In dieser Arbeit pr{\"a}sentieren wir hierzu das
QUALM-Konzept zum qualitativ hochwertigen Mining von Textdaten (QUALity
Mining), das die drei o.g. Nachteile adressiert. Das Ziel von QUALM ist es, die
Qualit{\"a}t der Analyseergebnisse, z. B. bzgl. der Accuracy einer
Textklassifikation, auf Basis einer Messung und Verbesserung der Datenqualit{\"a}t
zu erh{\"o}hen. QUALM bietet hierzu eine Menge an QUALM-Datenqualit{\"a}tsmethoden.
QUALM-Indikatoren erfassen die Datenqualit{\"a}t ganzheitlich auf Basis der Passung
zwischen den Eingabedaten und den Spezifika der Analysetools, wie den
verwendeten Features, Trainingsdaten und semantischen Ressourcen (wie zum
Beispiel W{\"o}rterb{\"u}chern oder Taxonomien). Zu jedem Indikator geh{\"o}rt ein
passender Modifikator, mit dem sowohl die Daten als auch die Spezifika der
Analysetools ver{\"a}ndert werden k{\"o}nnen, um die Datenqualit{\"a}t zu erh{\"o}hen. In einer
ersten Evaluation von QUALM zeigen wir f{\"u}r konkrete Analysetools und
Datens{\"a}tze, dass die Anwendung der QUALM-Datenqualit{\"a}tsmethoden auch mit einer
Erh{\"o}hung der Qualit{\"a}t der Analyseergebnisse im Sinne der Evaluationsmetrik
Accuracy einhergeht. Die Passung zwischen Eingabedaten und Spezifika der
Analysetools wird hierzu mit konkreten QUALM-Modifikatoren erh{\"o}ht, die zum
Beispiel Abk{\"u}rzungen aufl{\"o}sen oder automatisch auf Basis von
Text{\"a}hnlichkeitsmetriken passende Trainingsdaten vorschlagen.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2019-10&engl=1}
}
@article {ART-2019-09,
author = {Karoline Saatkamp and Uwe Breitenb{\"u}cher and Oliver Kopp and Frank Leymann},
title = {{Method, formalization, and algorithms to split topology models for distributed cloud application deployments}},
journal = {Computing},
publisher = {Springer Wien},
pages = {1--21},
type = {Article in Journal},
month = {April},
year = {2019},
isbn = {10.1007/s00607-019-00721-8},
keywords = {Application deployment; Distribution; Splitting; Multi-cloud; TOSCA},
language = {English},
cr-category = {D.2.2 Software Engineering Design Tools and Techniques},
ee = {https://link.springer.com/article/10.1007/s00607-019-00721-8},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {For automating the deployment of applications in cloud environments, a variety
of technologies have been developed in recent years. These technologies enable
to specify the desired deployment in the form of deployment models that can be
automatically processed by a provisioning engine. However, the deployment
across several clouds increases the complexity of the provisioning. Using one
deployment model with a single provisioning engine, which orchestrates the
deployment across the clouds, forces the providers to expose low-level APIs to
ensure the accessibility from outside. In this paper, we present an extended
version of the split and match method to facilitate the division of deployment
models to multiple models which can be deployed by each provider separately.
The goal of this approach is to reduce the information and APIs which have to
be exposed to the outside. We present a formalization and algorithms to
automate the method. Moreover, we validate the practical feasibility by a
prototype based on the TOSCA standard and the OpenTOSCA ecosystem.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2019-09&engl=1}
}
@article {ART-2019-07,
author = {Manuel Fritz and Michael Behringer and Holger Schwarz},
title = {{Quality-driven early stopping for explorative cluster analysis for big data}},
journal = {Software-Intensive Cyber-Physical Systems},
publisher = {Springer Berlin Heidelberg},
pages = {1--12},
type = {Article in Journal},
month = {February},
year = {2019},
issn = {2524-8510},
isbn = {2524-8529},
doi = {10.1007/s00450-019-00401-0},
keywords = {Clustering; Big Data; Early Stop; Convergence; Regression},
language = {English},
cr-category = {E.0 Data General,
H.2.8 Database Applications,
H.3.3 Information Search and Retrieval},
ee = {https://link.springer.com/article/10.1007/s00450-019-00401-0},
contact = {manuel.fritz@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Data analysis has become a critical success factor for companies in all areas.
Hence, it is necessary to quickly gain knowledge from available datasets, which
is becoming especially challenging in times of big data. Typical data mining
tasks like cluster analysis are very time consuming even if they run in highly
parallel environments like Spark clusters. To support data scientists in
explorative data analysis processes, we need techniques to make data mining
tasks even more efficient. To this end, we introduce a novel approach to stop
clustering algorithms as early as possible while still achieving an adequate
quality of the detected clusters. Our approach exploits the iterative nature of
many cluster algorithms and uses a metric to decide after which iteration the
mining task should stop. We present experimental results based on a Spark
cluster using multiple huge datasets. The experiments unveil that our approach
is able to accelerate the clustering up to a factor of more than 800 by
obliterating many iterations which provide only little gain in quality. This
way, we are able to find a good balance between the time required for data
analysis and quality of the analysis results.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2019-07&engl=1}
}
@article {ART-2019-03,
author = {Karoline Saatkamp and Uwe Breitenb{\"u}cher and Oliver Kopp and Frank Leymann},
title = {{An approach to automatically detect problems in restructured deployment models based on formalizing architecture and design patterns}},
journal = {SICS Software-Intensive Cyber-Physical Systems},
publisher = {Springer Berlin Heidelberg},
pages = {1--13},
type = {Article in Journal},
month = {February},
year = {2019},
doi = {10.1007/s00450-019-00397-7},
keywords = {Topology-based deployment model; Patterns; Problem detection; TOSCA; Logic programming, Prolog},
language = {English},
cr-category = {C.2.4 Distributed Systems,
D.2.2 Software Engineering Design Tools and Techniques,
D.2.12 Software Engineering Interoperability,
K.6 Management of Computing and Information Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {For the automated deployment of applications, technologies exist which can
process topology-based deployment models that describes the application's
structure with its components and their relations. The topology-based
deployment model of an application can be adapted for the deployment in
different environments. However, the structural changes can lead to problems,
which had not existed before and prevent a functional deployment. This includes
security issues, communication restrictions, or incompatibilities. For example,
a formerly over the internal network established insecure connection leads to
security problems when using the public network after the adaptation. In order
to solve problems in adapted deployment models, first the problems have to be
detected. Unfortunately, detecting such problems is a highly non-trivial
challenge that requires deep expertise about the involved technologies and the
environment. In this paper, we present (i) an approach for detecting problems
in deployment models using architecture and design patterns and (ii) the
automation of the detection process by formalizing the problem a pattern solves
in a certain context. We validate the practical feasibility of our approach by
a prototypical implementation for the automated problem detection in TOSCA
topologies.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2019-03&engl=1}
}
@article {ART-2018-07,
author = {Eva Hoos and Pascal Hirmer and Bernhard Mitschang},
title = {{Automated Creation and Provisioning of Decision Information Packages for the Smart Factory}},
journal = {Complex Systems Informatics and Modeling Quarterly},
publisher = {Online},
volume = {15},
pages = {72--89},
type = {Article in Journal},
month = {August},
year = {2018},
issn = {2255-9922},
doi = {10.7250/csimq.2018-15.04},
keywords = {Industry 4.0; Context-awareness; Data Provisioning},
language = {English},
cr-category = {H.0 Information Systems General},
ee = {https://csimq-journals.rtu.lv/article/view/csimq.2018-15.04},
contact = {Pascal.Hirmer@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2018-07&engl=1}
}
@article {ART-2018-06,
author = {Christian Weber and Matthias Wieland and Peter Reimann},
title = {{Konzepte zur Datenverarbeitung in Referenzarchitekturen f{\"u}r Industrie 4.0: Konsequenzen bei der Umsetzung einer IT-Architektur}},
journal = {Datenbank-Spektrum},
publisher = {Springer Berlin Heidelberg},
volume = {18},
number = {1},
pages = {39--50},
type = {Article in Journal},
month = {March},
year = {2018},
issn = {1610-1995},
doi = {10.1007/s13222-018-0275-z},
keywords = {Industrie 4.0; Referenzarchitektur; Datenverarbeitung; RAMI4.0; IIRA},
language = {German},
cr-category = {H.4.0 Information Systems Applications General,
J.2 Physical Sciences and Engineering},
ee = {https://link.springer.com/article/10.1007/s13222-018-0275-z},
contact = {Senden Sie eine E-Mail an christian.weber@gsame.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {F{\"u}r produzierende Unternehmen stellt die effiziente Verarbeitung gro{\ss}er
Datenmengen eine Herausforderung dar. Die Auswahl der richtigen
Architekturkonzepte f{\"u}r IT-L{\"o}sungen zur Datenverarbeitung spielt dabei eine
wichtige Rolle. Um die IT an den Herausforderungen von Industrie 4.0
auszurichten, stehen Unternehmen verschiedene Referenzarchitekturen
internationaler Gremien zur Verf{\"u}gung. Die Hauptbeitr{\"a}ge dieses Artikels haben
das Ziel, (i) einen {\"U}berblick {\"u}ber die wichtigsten Referenzarchitekturen f{\"u}r
Industrie 4.0 (I4.0) zu geben und (ii) diese unter dem Aspekt der
Datenverarbeitung zu untersuchen. Dazu werden die Referenzarchitekturen anhand
von Datenverarbeitungsanforderungen f{\"u}r I4.0 betrachtet. Die Untersuchung
zeigt, dass die I4.0-Referenzarchitekturen jeweils einen Teilbereich der
Anforderungen abdecken und sich die Konzepte gegenseitig erg{\"a}nzen. (iii) Darauf
aufbauend werden aus den Datenverarbeitungsanforderungen technische
Konsequenzen abgeleitet und Architekturkonzepte f{\"u}r die Realisierung einer
IT-Architektur f{\"u}r die Datenverarbeitung vorgestellt. Dadurch wird es
IT-Architekten erm{\"o}glicht, einen Vergleich der Referenzarchitekturen
hinsichtlich projektbezogener Anforderungen an die Datenverarbeitung
vorzunehmen sowie geeignete Architekturentscheidungen zu treffen.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2018-06&engl=1}
}
@article {ART-2017-10,
author = {Ana Cristina Franco da Silva and Pascal Hirmer and Uwe Breitenb{\"u}cher and Oliver Kopp and Bernhard Mitschang},
title = {{Customization and provisioning of complex event processing using TOSCA}},
journal = {Computer Science - Research and Development},
publisher = {Springer Berlin Heidelberg},
pages = {1--11},
type = {Article in Journal},
month = {September},
year = {2017},
issn = {1865-2042},
issn = {1865-2034},
doi = {10.1007/s00450-017-0386-z},
keywords = {Internet of Things; Complex event processing; Customization; TOSCA},
language = {English},
cr-category = {K.6 Management of Computing and Information Systems,
D.2.12 Software Engineering Interoperability},
ee = {https://link.springer.com/article/10.1007/s00450-017-0386-z},
contact = {Ana-Cristina.Franco-da-Silva@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2017-10&engl=1}
}
@article {ART-2017-09,
author = {Pascal Hirmer and Michael Behringer and Bernhard Mitschang},
title = {{Partial execution of Mashup Plans during modeling time}},
journal = {Computer Science - Research and Development},
publisher = {Springer Berlin Heidelberg},
pages = {1--12},
type = {Article in Journal},
month = {September},
year = {2017},
issn = {1865-2034},
issn = {1865-2042},
doi = {10.1007/s00450-017-0388-x},
keywords = {Workflows; Modeling; BPEL; Partial execution; Data Mashups},
language = {English},
cr-category = {H.2.8 Database Applications,
H.3.0 Information Storage and Retrieval General,
E.1 Data Structures},
ee = {https://link.springer.com/article/10.1007%2Fs00450-017-0388-x},
contact = {pascal.hirmer@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2017-09&engl=1}
}
@article {ART-2017-07,
author = {Christian Weber and Jan K{\"o}nigsberger},
title = {{Industrie 4.0: Aktuelle Entwicklungen f{\"u}r Analytics - Teil 2: Vergleich und Bewertung von Industrie 4.0-Referenzarchitekturen}},
journal = {wt Werkstattstechnik online},
publisher = {Springer-VDI-Verlag},
volume = {107},
number = {6},
pages = {405--409},
type = {Article in Journal},
month = {June},
year = {2017},
keywords = {IT Architecture; Analytics; Edge Analytics; Big Data; Smart Manufacturing; Industrie 4.0; Industrial Internet},
language = {German},
cr-category = {H.4.0 Information Systems Applications General,
J.2 Physical Sciences and Engineering,
J.6 Computer-Aided Engineering},
ee = {http://www.werkstattstechnik.de/wt/currentarticle.php?data[article_id]=87256},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Die Verarbeitung gro{\ss}er Datenmengen sowie die Erkenntnis, dass Datenanalysen
eine hohe Relevanz haben, sind in den produzierenden Unternehmen angekommen.
Bekannte Anwendungsbeispiele sind Digital Mock-Up in der Produktentwicklung
oder Prozessoptimierung durch Predictive Maintenance. Die in letzter Zeit
entwickelten Referenzarchitekturen in diesen breitgef{\"a}cherten Themenfeldern
betrachten dementsprechend verschiedene Aspekte in unterschiedlichen
Auspr{\"a}gungen. Dieser aus zwei Teilen bestehende Beitrag rekapituliert und
bewertet diese Entwicklungen, um Unternehmen bei der Umsetzung ihrer eigenen
individuellen Architektur Hilfestellung zu geben. Im ersten Teil des Beitrags
(Ausgabe 3-2017: wt Werkstattstechnik online) wurden aktuelle
Referenzarchitekturen mit ihren Architekturbausteinen im Bereich Industrie 4.0
vorgestellt. In diesem zweiten Teil werden nun die Referenzarchitekturen unter
dem Gesichtspunkt der Themenfelder Analytics sowie Datenmanagement untersucht
und bewertet.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2017-07&engl=1}
}
@article {ART-2017-02,
author = {Christian Weber and Jan K{\"o}nigsberger},
title = {{Industrie 4.0: Aktuelle Entwicklungen f{\"u}r Analytics - Teil 1: Analytics und Datenmanagement in Industrie 4.0-Referenzarchitekturen}},
journal = {wt Werkstattstechnik online},
address = {D{\"u}sseldorf},
publisher = {Springer-VDI-Verlag},
volume = {107},
number = {3},
pages = {113--117},
type = {Article in Journal},
month = {March},
year = {2017},
keywords = {IT Architecture; Analytics; Edge Analytics; Big Data; Smart Manufacturing; Industrie 4.0; Industrial Internet},
language = {German},
cr-category = {H.4.0 Information Systems Applications General,
J.2 Physical Sciences and Engineering,
J.6 Computer-Aided Engineering},
ee = {http://www.werkstattstechnik.de/wt/currentarticle.php?data[article_id]=87256},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Die Verarbeitung gro{\ss}er Datenmengen sowie die hohe Relevanz von Datenanalysen
sind in den produzierenden Unternehmen mittlerweile angekommen. Bekannte
Anwendungsbeispiele sind Digital Mock-Up in der Produktentwicklung oder
Prozessoptimierung durch Predictive Maintenance. Die in letzter Zeit
entwickelten Referenzarchitekturen in diesen breitgef{\"a}cherten Themenfeldern
betrachten dementsprechend verschiedene Aspekte in unterschiedlichen
Auspr{\"a}gungen. Dieser aus zwei Teilen bestehende Fachbeitrag rekapituliert und
bewertet diese Entwicklungen, um Unternehmen bei der Umsetzung ihrer eigenen
individuellen Architektur Hilfestellung zu geben. Im Teil 1 werden aktuelle
Referenzarchitekturen mit ihren Architekturbausteinen im Bereich Industrie 4.0
vorgestellt. Im zweiten Teil (Ausgabe 6-2017 der wt Werkstattstechnik online)
werden die Referenzarchitekturen unter dem Gesichtspunkt der Themenfelder
Analytics sowie Datenmanagement untersucht und bewertet.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2017-02&engl=1}
}
@article {ART-2016-26,
author = {Uwe Breitenb{\"u}cher and Christian Endres and K{\'a}lm{\'a}n K{\'e}pes and Oliver Kopp and Frank Leymann and Sebastian Wagner and Johannes Wettinger and Michael Zimmermann},
title = {{The OpenTOSCA Ecosystem - Concepts \& Tools}},
journal = {European Space project on Smart Systems, Big Data, Future Internet - Towards Serving the Grand Societal Challenges - Volume 1: EPS Rome 2016},
publisher = {SciTePress},
pages = {112--130},
type = {Article in Journal},
month = {December},
year = {2016},
isbn = {978-989-758-207-3},
doi = {10.5220/0007903201120130},
keywords = {TOSCA; OpenTOSCA; Orchestration; Management; Cloud},
language = {English},
cr-category = {D.2.2 Software Engineering Design Tools and Techniques,
D.2.9 Software Engineering Management},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {Automating the provisioning and management of Cloud applications is one of the
most important issues in Cloud Computing. The Topology and Orchestration
Specification for Cloud Applications (TOSCA) is an OASIS standard for
describing Cloud applications and their management in a portable and
interoperable manner. TOSCA enables modeling the application's structure in the
form of topology models and employs the concept of executable management plans
to describe all required management functionality regarding the application. In
this paper, we give an overview of TOSCA and the OpenTOSCA Ecosystem, which is
an implementation of the TOSCA standard. The ecosystem consists of
standard-compliant tools that enable modeling application topology models and
automating the provisioning and management of the modeled applications.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-26&engl=1}
}
@article {ART-2016-23,
author = {Pascal Hirmer and Uwe Breitenb{\"u}cher and Ana Cristina Franco da Silva and K{\'a}lm{\'a}n K{\'e}pes and Bernhard Mitschang and Matthias Wieland},
title = {{Automating the Provisioning and Configuration of Devices in the Internet of Things}},
journal = {Complex Systems Informatics and Modeling Quarterly},
publisher = {Online},
volume = {9},
pages = {28--43},
type = {Article in Journal},
month = {December},
year = {2016},
doi = {10.7250/csimq.2016-9.02},
issn = {2255 - 9922},
keywords = {Internet of Things; sensors; actuators; digital twin; ontologies; TOSCA},
language = {English},
cr-category = {J.6 Computer-Aided Engineering,
H.3.1 Content Analysis and Indexing},
ee = {https://csimq-journals.rtu.lv/article/view/csimq.2016-9.02/pdf_8},
contact = {pascal.hirmer@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The Internet of Things benefits from an increasing number of interconnected
technical devices. This has led to the existence of so-called smart
environments, which encompass one or more devices sensing, acting, and
automatically performing different tasks to enable their self-organization.
Smart environments are divided into two parts: the physical environment and its
digital representation, oftentimes referred to as digital twin. However, the
automated binding and monitoring of devices of smart environments are still
major issues. In this article we present a method and system architecture to
cope with these challenges by enabling (i) easy modeling of sensors, actuators,
devices, and their attributes, (ii) dynamic device binding based on their type,
(iii) the access to devices using different paradigms, and (iv) the monitoring
of smart environments in regard to failures or changes. We furthermore provide
a prototypical implementation of the introduced approach.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-23&engl=1}
}
@article {ART-2016-19,
author = {Marina Bitsaki and Christos Koutras and George Koutras and Frank Leymann and Frank Steimle and Sebastian Wagner and Matthias Wieland},
title = {{ChronicOnline: Implementing a mHealth solution for monitoring and early alerting in chronic obstructive pulmonary disease}},
journal = {Health Informatics Journal},
publisher = {Sage Publications},
pages = {1--10},
type = {Article in Journal},
month = {April},
year = {2016},
doi = {10.1177/1460458216641480},
keywords = {chronic obstructive pulmonary disease; cloud computing; health services; mobile applications; monitoring},
language = {English},
cr-category = {C.2.4 Distributed Systems,
H.2.8 Database Applications,
J.3 Life and Medical Sciences},
ee = {http://jhi.sagepub.com/content/early/2016/04/16/1460458216641480.full.pdf+html},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {Lack of time or economic difficulties prevent chronic obstructive pulmonary
disease patients from communicating regularly with their physicians, thus
inducing exacerbation of their chronic condition and possible hospitalization.
Enhancing Chronic patients{\^a}€™ Health Online proposes a new, sustainable and
innovative business model that provides at low cost and at significant savings
to the national health system, a preventive health service for chronic
obstructive pulmonary disease patients, by combining human medical expertise
with state-of-the-art online service delivery based on cloud computing,
service-oriented architecture, data analytics, and mobile applications. In this
article, we implement the frontend applications of the Enhancing Chronic
patients{\^a}€™ Health Online system and describe their functionality and the
interfaces available to the users.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-19&engl=1}
}
@article {ART-2016-18,
author = {Frank Steimle and Matthias Wieland and Bernhard Mitschang and Sebastian Wagner and Frank Leymann},
title = {{Extended provisioning, security and analysis techniques for the ECHO health data management system}},
journal = {Computing},
publisher = {Springer},
pages = {1--19},
type = {Article in Journal},
month = {October},
year = {2016},
doi = {10.1007/s00607-016-0523-8},
language = {English},
cr-category = {C.2.4 Distributed Systems,
H.2.8 Database Applications,
J.3 Life and Medical Sciences},
ee = {http://dx.doi.org/10.1007/s00607-016-0523-8},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {eHealth gains more and more interest since a lot of end-user devices supporting
health data capturing are available. The captured data has to be managed and
securely stored, in order to access it from different devices and share it with
other users such as physicians. The aim of the german-greek research project
ECHO is to support the treatment of patients, who suffer from chronic
obstructive pulmonary disease, a chronic respiratory disease. Usually the
patients need to be examined by their physicians on a regular basis due to
their chronic condition. Since this is very time consuming and expensive we
developed an eHealth system which allows the physician to monitor patients
condition remotely, e.g., via smart phones. This article is an extension of
previous work, where we introduced a health data model and an associated
platform-architecture for the management and analysis of the data provided by
the patients. There we have also shown how the security of the data is ensured
and we explained how the platform can be provided in a cloud-based environment
using the OASIS standard TOSCA, which enables a self-contained management of
cloud-services. In this article we provide a more detailed description about
the health data analysis, provisioning and security aspects of the eHealth
system.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-18&engl=1}
}
@article {ART-2016-16,
author = {Mathias Mormul and Pascal Hirmer and Matthias Wieland and Bernhard Mitschang},
title = {{Situation model as interface between situation recognition and situation-aware applications}},
journal = {Computer Science - Research and Development},
publisher = {Springer Berlin Heidelberg},
pages = {1--12},
type = {Article in Journal},
month = {November},
year = {2016},
doi = {10.1007/s00450-016-0335-2},
keywords = {Situation; Situation-awareness; Data management; Internet of things; Context; Context-awareness},
language = {English},
cr-category = {J.6 Computer-Aided Engineering,
H.3.1 Content Analysis and Indexing},
ee = {http://link.springer.com/article/10.1007/s00450-016-0335-2},
contact = {pascal.hirmer@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The upcoming of internet of things draws interest of many companies and leads
to the creation of smart environments. The foundation necessary for this
purpose lies in the integration of sensors, which continuously provide context
data of their environment. Based on this context, changes of state in the
environment, i.e., situations, can be detected. However, with the huge amount
of heterogeneous context and its processing, new challenges arise.
Simultaneously, the dynamic behavior of the environment demands automated
mechanisms for applications to adapt to the situations automatically and in a
timely manner. To meet this challenge, we present (1) the situation model as a
data model for integrating all data related to situation recognition, and (2)
the management and provisioning of situations based on this situation model to
further decouple situation recognition and applications adapting to recognized
situations. Furthermore, we present a prototypical implementation of the
situation model and its management.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-16&engl=1}
}
@article {ART-2016-14,
author = {Ana Cristina Franco da Silva and Pascal Hirmer and Matthias Wieland and Bernhard Mitschang},
title = {{SitRS XT – Towards Near Real Time Situation Recognition}},
journal = {Journal of Information and Data Management},
publisher = {-},
volume = {7},
number = {1},
pages = {4--17},
type = {Article in Journal},
month = {April},
year = {2016},
keywords = {Complex Event Processing; Internet of Things; Situation-awareness; Situation Recognition},
language = {English},
cr-category = {H.3 Information Storage and Retrieval,
I.5 Pattern Recognition},
ee = {https://seer.lcc.ufmg.br/index.php/jidm/article/view/2109},
contact = {franco-da-silva@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Nowadays, the Internet of Things gains more and more attention through cheap,
highly interconnected hardware devices that are attached with sensors and
actuators. This results in an instrumented environment that provides sufficient
context information to drive what is called situation recognition. Situations
are derived from large amounts of context data, which is difficult to handle.
In this article, we present SitRS XT, an extension of our previously introduced
situation recognition service SitRS, to enable situation recognition in near
real time. SitRS XT provides easy to use situation recognition based on Complex
Event Processing, which is highly efficient. The architecture and method of
SitRS XT is described and evaluated through a prototypical implementation.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-14&engl=1}
}
@article {ART-2016-13,
author = {Pascal Hirmer and Bernhard Mitschang},
title = {{TOSCA4Mashups: enhanced method for on-demand data mashup provisioning}},
journal = {Computer Science - Research and Development},
publisher = {Springer Berlin Heidelberg},
pages = {1--10},
type = {Article in Journal},
month = {October},
year = {2016},
doi = {10.1007/s00450-016-0330-7},
keywords = {Data Mashups; TOSCA; Provisioning; Cloud Computing},
language = {English},
cr-category = {E.0 Data General,
E.1 Data Structures,
H.1 Models and Principles},
ee = {http://link.springer.com/article/10.1007/s00450-016-0330-7},
contact = {Pascal.Hirmer@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Nowadays, the amount of data increases tremendously. Extracting information and
generating knowledge from this data is a great challenge. To cope with this
issue – oftentimes referred to as big data problem – we need effective means
for efficient data integration, data processing, and data analysis. To enable
flexible, explorative and ad-hoc data processing, several data mashup
approaches and tools have been developed in the past. One of these tools is
FlexMash – a data mashup tool developed at the University of Stuttgart. By
offering domain-specific graphical modeling as well as a pattern-based
execution, FlexMash enables usage by a wide range of users, both domain experts
and technical experts. The core idea of FlexMash is a flexible execution of
data mashups using different, user-requirement-dependent execution components.
In this paper, we present a new approach for on-demand, automated provisioning
of these components in a cloud computing environment using the Topology and
Orchestration Specification for Cloud Applications. This enables many
advantages for mashup execution such as scalability, availability and cost
savings.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-13&engl=1}
}
@article {ART-2016-12,
author = {Pascal Hirmer and Matthias Wieland and Holger Schwarz and Bernhard Mitschang and Uwe Breitenb{\"u}cher and Santiago G{\'o}mez S{\'a}ez and Frank Leymann},
title = {{Situation recognition and handling based on executing situation templates and situation-aware workflows}},
journal = {Computing},
publisher = {Springer},
pages = {1--19},
type = {Article in Journal},
month = {October},
year = {2016},
doi = {10.1007/s00607-016-0522-9},
keywords = {Situation Recognition; IoT; Context; Integration; Cloud Computing; Workflows; Middleware},
language = {English},
cr-category = {J.6 Computer-Aided Engineering,
H.3.1 Content Analysis and Indexing},
ee = {http://dx.doi.org/10.1007/s00607-016-0522-9},
contact = {pascal.hirmer@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Today, the Internet of Things has evolved due to an advanced interconnectivity
of hardware devices equipped with sensors and actuators. Such connected
environments are nowadays well-known as smart environments. Famous examples are
smart homes, smart cities, and smart factories. Such environments should only
be called {\ss}mart`` if they allow monitoring and self-organization. However, this
is a great challenge: (1) sensors have to be bound and sensor data have to be
efficiently provisioned to enable monitoring of these environments, (2)
situations have to be detected based on sensor data, and (3) based on the
recognized situations, a reaction has to be triggered to enable
self-organization, e.g., through notification delivery or the execution of
workflows. In this article, we introduce SitOPT---an approach for situation
recognition based on raw sensor data and automated handling of occurring
situations through notification delivery or execution of situation-aware
workflows. This article is an extended version of the paper ''SitRS - Situation
Recognition based on Modeling and Executing Situation Templates`` presented at
the 9th Symposium and Summer School of Service-oriented Computing 2015.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-12&engl=1}
}
@article {ART-2016-10,
author = {Christoph Hochreiner and Stefan Schulte and Oliver Kopp},
title = {{Bericht zum 8. ZEUS Workshop}},
journal = {Softwaretechnik-Trends},
publisher = {Online},
volume = {36},
number = {2},
pages = {61--62},
type = {Article in Journal},
month = {August},
year = {2016},
issn = {0720-8928},
language = {German},
cr-category = {H.4.1 Office Automation},
ee = {http://pi.informatik.uni-siegen.de/gi/stt/36_2/03_Technische_Beitraege/ZEUS2016/bericht_zeus.pdf,
http://pi.informatik.uni-siegen.de/gi/stt/36_2/index.html},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Es wird {\"u}ber den 8. ZEUS Workshop in Wien im Speziellen und dem ZEUS Workshop
als Konzept im Allgemeinen berichtet.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-10&engl=1}
}
@article {ART-2016-06,
author = {Christoph Gr{\"o}ger and Christoph Stach and Bernhard Mitschang and Engelbert Westk{\"a}mper},
title = {{A mobile dashboard for analytics-based information provisioning on the shop floor}},
journal = {International Journal of Computer Integrated Manufacturing},
publisher = {Taylor \& Francis Inc.},
pages = {1--20},
type = {Article in Journal},
month = {May},
year = {2016},
doi = {10.1080/0951192X.2016.1187292},
keywords = {dashboard; cockpit; process optimisation; data analytics; business intelligence; data mining},
language = {English},
cr-category = {H.4.0 Information Systems Applications General,
J.2 Physical Sciences and Engineering},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Today's turbulent global environment requires agility and flexibility of
manufacturing companies to stay competitive. Thus, employees have to monitor
their performance continuously and react quickly to turbulences which demands
real-time information provisioning across all hierarchy levels. However,
existing manufacturing IT systems, for example, manufacturing execution systems
(MES), do hardly address information needs of individual employees on the shop
floor. Besides, they do not exploit advanced analytics to generate novel
insights for process optimisation. To address these issues, the operational
process dashboard for manufacturing (OPDM) is presented, a mobile
data-mining-based dashboard for workers and supervisors on the shop floor. It
enables proactive optimisation by providing analytical information anywhere and
anytime in the factory. In this paper, first, user groups and conceptual
dashboard services are defined. Then, IT design issues of a mobile shop floor
application on top of the advanced manufacturing analytics platform are
investigated in order to realise the OPDM. This comprises the evaluation of
different types of mobile devices, the development of an appropriate context
model and the investigation of security issues. Finally, an evaluation in an
automotive industry case is presented using a prototype in order to demonstrate
the benefits of the OPDM for data-driven process improvement and agility in
manufacturing.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-06&engl=1}
}
@article {ART-2015-10,
author = {Cornelia Kiefer and Ulrike Pado},
title = {{Freitextaufgaben in Online-Tests – Bewertung und Bewertungsunterst{\"u}tzung}},
journal = {HMD Praxis der Wirtschaftsinformatik},
publisher = {Springer},
volume = {52},
number = {1},
pages = {96--107},
type = {Article in Journal},
month = {January},
year = {2015},
isbn = {10.1365/s40702-014-0104-2},
language = {German},
cr-category = {J Computer Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Der Einsatz von eLearning-Szenarien bietet viele innovative M{\"o}glichkeiten f{\"u}r
die Wissensvermittlung. Spezielle eLearning-Tools dienen dazu, Lernressourcen,
interaktive Elemente sowie Interaktions- und Kommunikationsm{\"o}glichkeiten
bereitzustellen und zu kombinieren. So wird selbstgesteuertes, asynchrones
Lernen m{\"o}glich, methodisch erschlie{\ss}en sich neue Wege und hohe Aufw{\"a}nde f{\"u}r
gro{\ss}e Lerngruppen k{\"o}nnen sinken. In diesem Zusammenhang stellt sich die Frage,
welchen Nutzen die computergest{\"u}tzte Umsetzung von Lernstands{\"u}berpr{\"u}fungen
(Tests und Klausuren) f{\"u}r Dozenten und Lernende haben kann. Stark assoziiert
mit Tests im eLearning-Bereich sind Multiple-Choice-Aufgaben. Als automatisch
korrigierbare Fragen k{\"o}nnen sie im eLearning-Umfeld schnell und objektiv
bewertet werden und liefern auch bei gro{\ss}en Teilnehmerzahlen schnell Feedback
an Lernende und Dozenten. Gleichzeitig zweifeln viele Dozenten daran, dass
diese Frageform die geforderten Kenntnisse und F{\"a}higkeiten wirklich
wiederspiegeln und bef{\"u}rchten ungerechtfertigte Erfolge durch Raten.
Freitextfragen umgehen diese Probleme und bieten den Mehrwert einer klareren
Einsicht in die Denkweise des Pr{\"u}flings, doch ist ihre Korrektur zeitaufw{\"a}ndig
und oft subjektiv. Wir geben Hinweise f{\"u}r die Praxis, die die Bewertung von
Freitextaufgaben verbessern und beschleunigen helfen, und illustrieren unsere
{\"U}berlegungen an einem realen Datensatz von Freitextfragen und Antworten, der im
Verlauf einer Einf{\"u}hrungsveranstaltung in die Programmierung f{\"u}r Informatiker
und Wirtschaftsinformatiker gewonnen wurde. Abschlie{\ss}end stellen wir unsere
noch andauernde Arbeit an einem System zur halbautomatischen
Bewerterunterst{\"u}tzung vor, das vom computerbasierten Umfeld im
eLearning-Bereich profitiert und sowohl den Zeitaufwand f{\"u}r die manuelle
Bewertung als auch die Replizierbarkeit der Bewertungen weiter optimieren soll.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2015-10&engl=1}
}
@article {ART-2015-05,
author = {Christoph Stach and Bernhard Mitschang},
title = {{Der Secure Data Container (SDC)}},
journal = {Datenbank-Spektrum},
address = {Berlin, Heidelberg},
publisher = {Springer Verlag},
volume = {15},
number = {2},
pages = {109--118},
type = {Article in Journal},
month = {July},
year = {2015},
issn = {1618-2162},
doi = {10.1007/s13222-015-0189-y},
keywords = {Datenschutz; Schutzziele; PMP-Erweiterung; Datencontainer; Evaluation},
language = {German},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
D.4.6 Operating Systems Security and Protection},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Mobile Endger{\"a}te wurden zu Marc Weisers Computer des 21. Jahrhunderts, da sie
als dauerhaft verf{\"u}gbare Informationsquelle Einzug in unseren Alltag gehalten
haben. Auf ihnen treffen private Daten (z.B. Fotos) auf Kontextdaten (z.B.
Standortdaten); verkn{\"u}pft stellen diese ein immenses Sicherheitsrisiko dar. Wie
eine Vielzahl an Datendiebst{\"a}hlen belegt, reichen die existierenden
Datensicherheitssysteme f{\"u}r Mobilplattformen bei weitem nicht aus. Daher bedarf
es einer Identifikation m{\"o}glicher Angriffsvektoren sowie einer Analyse der
speziellen Schutzziele eines solchen Systems. Darauf basierend wird die Privacy
Management Platform, ein Berechtigungssystem, mithilfe des neu eingef{\"u}hrten
Secure Data Containers zu einem ganzheitlichen Datensicherheitssystem
erweitert. Dabei zeigt sich, dass diese Kombination alle Schutzziele erf{\"u}llt
und dennoch hochperformant ist. Obwohl die vorgestellten Prototypen auf Android
basieren, ist das Konzept auch auf andere App-Plattformen anwendbar.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2015-05&engl=1}
}
@article {ART-2015-03,
author = {Stefan Silcher and Erwin Gro{\ss} and Jan K{\"o}nigsberger and J{\"o}rg Siegert and Michael Lickefett and Thomas Bauernhansl and Bernhard Mitschang},
title = {{Mobile Fabriklayoutplanung}},
journal = {wt Werkstattstechnik online},
address = {D{\"u}sseldorf},
publisher = {Springer-VDI-Verlag},
volume = {105},
number = {3},
pages = {96--101},
type = {Article in Journal},
month = {March},
year = {2015},
language = {German},
cr-category = {J.6 Computer-Aided Engineering,
H.4.2 Information Systems Applications Types of Systems},
ee = {http://www.werkstattstechnik.de/wt/currentarticle.php?data[article_id]=82746},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Eine stetig steigende Produktvielfalt sowie ein sich schnell {\"a}nderndes
Marktumfeld erfordern neue Methoden der Planung, um auf diese Komplexit{\"a}t der
M{\"a}rkte angemessen zu reagieren. Einen wichtigen Stellhebel bildet dabei die
Wandlungsf{\"a}higkeit, die durch geeignete Planungsmethoden unterst{\"u}tzt und
optimiert wird. Gerade in der Layoutplanung k{\"o}nnen geeignete Methoden und
Systeme zu einem Wettbewerbsvorteil f{\"u}hren. Dieser Fachartikel geht am
Beispiel der Lernfabrik {\^a}€žadvanced Industrial Engineering{\^a}€œ (aIE) und einer
f{\"u}r das Android-Betriebssystem entwickelten App zur Layoutplanung auf die
mobile Layoutplanung {\^a}€žvor Ort{\^a}€œ in der Fabrik ein.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2015-03&engl=1}
}
@article {ART-2014-08,
author = {Uwe Breitenb{\"u}cher and Tobias Binz and Christoph Fehling and Oliver Kopp and Frank Leymann and Matthias Wieland},
title = {{Policy-Aware Provisioning and Management of Cloud Applications}},
journal = {International Journal On Advances in Security},
publisher = {Xpert Publishing Services},
volume = {7},
number = {1\&2},
pages = {15--36},
type = {Article in Journal},
month = {June},
year = {2014},
issn = {1942-2636},
keywords = {Cloud Computing; Application Management; Provisioning; Security; Policies},
language = {English},
cr-category = {D.2.7 Software Engineering Distribution, Maintenance, and Enhancement,
D.2.9 Software Engineering Management,
K.6 Management of Computing and Information Systems,
K.6.3 Software Management,
K.6.5 Security and Protection},
ee = {http://thinkmind.org/index.php?view=article&articleid=sec_v7_n12_2014_2},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {The automated provisioning and management of composite Cloud applications is a
major issue and of vital importance in Cloud Computing. It is key to enable
properties such as elasticity and pay-per-use. The functional aspects of
provisioning and management such as instantiating virtual machines or updating
software components are covered by various technologies on different technical
levels. However, currently available solutions are tightly coupled to
individual technologies without being able to consider non-functional security
requirements in a non-proprietary and interoperable way. In addition, due to
their heterogeneity, the integration of these technologies in order to benefit
from their individual strengths is a major problem - especially if
non-functional aspects have to be considered and integrated, too. In this
article, we present a concept that enables executing management tasks using
different heterogeneous management technologies in compliance with
non-functional security requirements specified by policies. We extend the
Management Planlet Framework by a prototypical implementation of the concept
and evaluate the approach by several case studies.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2014-08&engl=1}
}
@article {ART-2014-06,
author = {Tobias Binz and Uwe Breitenb{\"u}cher and Oliver Kopp and Frank Leymann},
title = {{Migration of enterprise applications to the cloud}},
journal = {it - Information Technology, Special Issue: Architecture of Web Application},
publisher = {De Gruyter},
volume = {56},
number = {3},
pages = {106--111},
type = {Article in Journal},
month = {May},
year = {2014},
doi = {10.1515/itit-2013-1032},
issn = {1611-2776},
keywords = {Migration; Cloud},
language = {English},
cr-category = {K.6 Management of Computing and Information Systems,
D.2.7 Software Engineering Distribution, Maintenance, and Enhancement},
contact = {a href=``http://www.iaas.uni-stuttgart.de/institut/mitarbeiter/binz''Tobias Binz/ a},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {The migration of existing applications to the cloud enables enterprises to
preserve their previous investments and - at the same time - to benefit from
the properties of the cloud. This article presents a semi-automated approach
for migrating existing enterprise applications to the cloud. Thereby,
information about the application is gathered in the source environment, the
application is extracted, transformed, and cloud-enabled. This makes the
application ready for provisioning in the target cloud. Cloud-enabling an
application preserves its business functionality and does not},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2014-06&engl=1}
}
@article {ART-2014-04,
author = {Cataldo Mega and Tim Waizenegger and David Lebutsch and Stefan Schleipen and J.M. Barney},
title = {{Dynamic cloud service topology adaption for minimizing resources while meeting performance goals}},
journal = {IBM Journal of Research and Development},
publisher = {IBM},
volume = {58},
number = {2},
pages = {1--10},
type = {Article in Journal},
month = {March},
year = {2014},
doi = {10.1147/JRD.2014.2304771},
issn = {0018-8646},
keywords = {Cloud computing; Electronic countermeasures; Network topology; Optimization; Resource management; Service agreements; Time measurement},
language = {English},
cr-category = {H.2 Database Management,
H.3 Information Storage and Retrieval,
D.4.8 Operating Systems Performance},
contact = {tim.waizenegger@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Even in the cloud computing era, meeting service-level agreements (SLAs) of a
computing service or application while significantly reducing the total cost of
ownership (TCO) remains a challenge. Cloud and software defined environments
(SDEs) are offering new opportunities for how resources can be utilized to an
even higher degree than before{\^a}€”which leads to a reduced TCO for service
providers and customers of a service. The traditional method of meeting an SLA
is to assess peak workloads and size a system accordingly. This still leads to
very low average compute resource utilization rates. This paper presents a
novel dynamic and cost-efficient orchestration approach of multi-tenant
capable, software defined system topologies based on a
monitor-analyze-plan-execute (MAPE) concept. We present the mechanism involved
in creating and applying these heuristics and show the results for a
cloud-based enterprise content management (ECM) solution. Our approach allows
the cloud provider to minimize its resource requirements while staying in
compliance with SLAs.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2014-04&engl=1}
}
@article {ART-2013-09,
author = {Sylvia Radesch{\"u}tz and Holger Schwarz and Florian Niedermann},
title = {{Business impact analysis — a framework for a comprehensive analysis and optimization of business processes}},
journal = {Computer Science – Research and Development},
publisher = {Springer},
pages = {1--18},
type = {Article in Journal},
month = {September},
year = {2013},
language = {English},
cr-category = {H.2 Database Management},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The ability to continuously adapt its business processes is a crucial ability
for any company in order to survive in todays dynamic world. In order to
accomplish this task, a company needs to profoundly analyze all its business
data. This generates the need for data integration and analysis techniques that
allow for a comprehensive analysis.
A particular challenge when conducting this analysis is the integration of
process data generated by workflow engines and operational data that is
produced by business applications and stored in data warehouses. Typically,
these two types of data are not matched as their acquisition and analysis
follows different principles, i.e., a process-oriented view versus a view
focusing on business objects.
To address this challenge, we introduce a framework that allows to improve
business processes considering an integrated view on process data and
operational data.We present and evaluate various architectural options for the
data warehouse that provides this integrated view based on a specialized
federation layer. This integrated view is also reflected in a set of operators
that we introduce.We show how these operators ease the definition of analysis
queries and how they allow to extract hidden optimization patterns by using
data mining techniques.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2013-09&engl=1}
}
@article {ART-2012-24,
author = {Christoph Stach},
title = {{Gamework – A Framework Approach for Customizable Pervasive Applications}},
journal = {International Journal of Computer Information Systems and Industrial Management Applications},
publisher = {MIR Labs},
volume = {4},
pages = {66--75},
type = {Article in Journal},
month = {July},
year = {2012},
issn = {2150-7988},
keywords = {Mobile Services; Pervasive Multi-player Games; Customizable Framework; Data Analysis and Improvement.},
language = {English},
cr-category = {D.3.3 Programming Language Constructs and Features,
D.2.13 Software Engineering Reusable Software,
K.8 Personal Computing},
contact = {Senden Sie eine E-Mail an christoph.stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The number of pervasive games is growing. In a pervasive game players have to
interact with their environment in order to control their avatar. While in the
past such a game required immense hardware equipment, nowadays, Smartphones
have all required functionality built-in already. In addition, there is an
upcoming trend towards software, supported with new content and knowledge by an
active community. By combining these two trends, a new genre for computer games
arises, targeting not only established gamers but also a new audience. In this
paper we present a framework for pervasive games that support various
customization techniques. Therefore, we divided the techniques into four
classes depending on the player’s technical knowledge and scale of adaption
potential. Further, we present two customizable pervasive games we have
implemented using the framework. Concluding, we show the potential of these
games as well as the potential of our framework. We also report on early
experiences in exploiting the customization approach of our framework.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2012-24&engl=1}
}
@article {ART-2012-19,
author = {Jorge Minguez and David Baureis and Donald Neumann},
title = {{A reference architecture for agile product-service systems}},
journal = {CIRP Journal of Manufacturing Science and Technology},
publisher = {Elsevier},
volume = {5},
number = {4},
pages = {319--327},
type = {Article in Journal},
month = {January},
year = {2012},
issn = {1755-5817},
doi = {10.1016/j.cirpj.2012.09.007},
keywords = {Service, Integration, Service-oriented architecture (SOA), Product-service system (PSS), Industrial product-service systems (IPS$<$sup$>$2$<$/sup$>$), Agility},
language = {English},
cr-category = {D.2.11 Software Engineering Software Architectures,
D.2.13 Software Engineering Reusable Software},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In times of globalization and saturated markets, manufacturing companies are
forced to find new possibilities of differentiation against competitors.
Product-service systems (PSS) are a strategic approach that offers this
possibility by integrating goods and services. The implementation of a PSS
entails challenges for the resulting supply chain structure and the IT
infrastructure supporting coordinated service offerings, such as conflicting
goals and coordination in the integrated business processes. The
service-oriented architecture (SOA) paradigm, based on loosely coupled
components, provides rapid reconfiguration of business processes, rapid
integration of services and goal definition through service level agreements.
This paper presents a reference architecture based on SOA to support
coordination and definition of goals in heterogeneous supply chains.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2012-19&engl=1}
}
@article {ART-2012-12,
author = {Nazario Cipriani and Oliver D{\"o}rler and Bernhard Mitschang},
title = {{Sicherer Zugriff und sichere Verarbeitung von Kontextdatenstr{\"o}men in einer verteilten Umgebung}},
journal = {Datenbank-Spektrum ``Data Streams and Event Processing''},
publisher = {dpunkt.verlag},
volume = {12},
number = {1},
pages = {13--22},
type = {Article in Journal},
month = {March},
year = {2012},
language = {German},
cr-category = {K.6.5 Security and Protection,
D.4.6 Operating Systems Security and Protection},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Technologischer Fortschritt im Bereich der Mikroelektronik und
Kommunikationstechnik f{\"u}hren zunehmend zu einem stark vernetzten, mit Sensoren
ausgestatteten Umfeld. Die damit einhergehende stetig steigende Anzahl an
Sensorinformationen, deren Daten in Form von Datenstr{\"o}men bereitgestellt
werden, erm{\"o}glichen neue Anwendungsszenarien und treiben neue
Verarbeitungstechniken. Im Kontext der sichverst{\"a}rkenden Durchdringung des
allt{\"a}glichen Lebens mit sozialen Medien und der gleichzeitigen Auswertung von
beispielsweise Positionsinformationen, w{\"a}chst die Bedeutung der
Zugriffskontrolle auf Information. Die Herausforderung in diesem Zusammenhang
besteht darin, Mechanismen zur Verf{\"u}gung zu stellen, die eine Regelung des
Datenzugriffs erm{\"o}glichen und die Datenstromverarbeitung effizient und
flexibel unterst{\"u}tzen.
Diese Arbeit stellt ein flexibles Rahmenwerk zur sicheren Verarbeitung von
Kontextdaten vor, das es Anbietern von Daten in Datenstromverarbeitungssystemen
erm{\"o}glicht, den Zugriff und die Verarbeitung sch{\"u}tzenswerter Daten zu
kontrollieren. Hierbei erm{\"o}glicht das vorgestellte Konzept im Gegensatz zu
bisherigen Konzepten insbesondere den feingranularen Zugriff auf Kontextdaten.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2012-12&engl=1}
}
@article {ART-2011-19,
author = {Bernhard Mitschang and Holger Schwarz},
title = {{Der Lehrstuhl ”Datenbanken und Informationssysteme” an der Universit{\"a}t Stuttgart stellt sich vor}},
journal = {Datenbank-Spektrum},
publisher = {Springer},
volume = {11},
number = {3},
pages = {213--217},
type = {Article in Journal},
month = {November},
year = {2011},
language = {German},
cr-category = {H.2 Database Management},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In diesem Beitrag stellen wir den Lehrstuhl f{\"u}r Datenbanken und
Informationssysteme der Universit{\"a}t Stuttgart unter der Leitung von Prof. Dr.
Bernhard Mitschang vor. Nach einem {\"U}berblick {\"u}ber die Forschungsschwerpunkte
des Lehrstuhls gehen wir auf ausgew{\"a}hlte aktuelle Forschungsprojekte ein und
erl{\"a}utern die Beteiligung an der Lehre in Bachelor- und Masterstudieng{\"a}ngen.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2011-19&engl=1}
}
@article {ART-2011-14,
author = {Peter Reimann and Holger Schwarz and Bernhard Mitschang},
title = {{Design, Implementation, and Evaluation of a Tight Integration of Database and Workflow Engines}},
journal = {Journal of Information and Data Management},
editor = {Alberto H. F. Laender and Mirella M. Moro},
publisher = {SBC - Brazilian Computer Society},
volume = {2},
number = {3},
pages = {353--368},
type = {Article in Journal},
month = {October},
year = {2011},
issn = {2178-7107},
keywords = {Data-Intensive Workflow; Improved Local Data Processing; Scientific Workflow; Simulation Workflow},
language = {English},
cr-category = {D.2.11 Software Engineering Software Architectures,
H.2.8 Database Applications,
H.4.1 Office Automation},
contact = {Peter Reimann Peter.Reimann@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Accessing and processing huge amounts of heterogeneous and distributed data are
some of the major challenges of data-intensive workflows. Traditionally, the
descriptions of such workflows focus on their data flow. Nevertheless,
control-flow-oriented workflow languages are increasingly adapted to the needs
of data-intensive workflows. This provides a common level of abstraction for
both data-intensive workflows and classical orchestration workflows, e.g.,
business workflows, which then enables a comprehensive optimization across all
workflows. However, the problem still remains that workflows described in
control-flow-oriented languages tend to be less efficient for data-intensive
processes compared to specialized data-flow-oriented approaches. In this paper,
we propose a new kind of optimization targeted at data-intensive workflows that
are described in control-flow-oriented languages. We show how to improve
efficiency of such workflows by introducing various techniques that partition
the local data processing tasks to be performed during workflow execution in an
improved way. These data processing tasks are either assigned to the workflow
engine or to the tightly integrated local database engine. We evaluate the
effectiveness of these techniques by means of various test scenarios.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2011-14&engl=1}
}
@article {ART-2011-12,
author = {Jorge Minguez and Stefan Silcher and Philipp Riffelmacher and Bernhard Mitschang},
title = {{A Service Bus Architecture for Application Integration in the Planning and Production Phases of a Product Lifecycle}},
journal = {International Journal of Systems and Service-Oriented Engineering},
publisher = {IGI Global},
volume = {2},
number = {2},
pages = {21--36},
type = {Article in Journal},
month = {June},
year = {2011},
issn = {1947-3052},
keywords = {Manufacturing Service Bus; Service-oriented Architecture; Product Lifecycle Management; SOA; MSB; PLM},
language = {English},
cr-category = {D.2.11 Software Engineering Software Architectures,
D.2.13 Software Engineering Reusable Software},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Constantly changing business conditions require a high level of flexibility in
business processes as well as an adaptive and fully interoperable IT
infrastructure in today’s manufacturing environments. The lack of flexibility
prevents manufacturing companies from improving their responsiveness and
adapting their workflows to turbulent scenarios. In order to achieve highly
flexible and adaptive workflows, information systems in digital factories and
shop floors need to be integrated. The most challenging problem in such
manufacturing environments is the high heterogeneity of the IT landscape, where
the integration of legacy systems and information silos has led to chaotic
architectures over the last two decades. In order to overcome this issue, the
authors present a flexible integration platform that allows a loose coupling of
distributed services in event-driven manufacturing environments. The proposed
approach enables a flexible communication between digital factory and shop
floor components by introducing a service bus architecture. This solution
integrates an application-independent canonical message format for
manufacturing events, content-based routing and transformation services as well
as event processing workflows.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2011-12&engl=1}
}
@article {ART-2011-07,
author = {Holger Schwarz},
title = {{Generierung des Datenzugriffs in Anwendungsprogrammen: Anwendungsbereiche und Implementierungstechniken}},
journal = {Datenbank Spektrum},
address = {Heidelberg},
publisher = {Springer},
volume = {11},
number = {1},
pages = {5--14},
type = {Article in Journal},
month = {April},
year = {2011},
language = {German},
cr-category = {H.4 Information Systems Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Datenzugriffe auf externe und heterogene Datenbest{\"a}nde sind ein grundlegender
Bestandteil von Anwendungsprogrammen in ganz unterschiedlichen
Anwendungsbereichen. Vielfach k{\"o}nnen diese Datenzugriffe nicht {\"u}ber statisch
eingebettete Anweisungen realisiert werden, sondern m{\"u}ssen dynamisch generiert
werden. In diesem Beitrag wird das Spektrum relevanter Anwendungsbereiche
vorgestellt. Ausgehend von einzelnen Systembeispielen werden wichtige Aspekte
anfragegenerierender Systeme verallgemeinert. Hierzu wird eine
Systemklassifikation vorgestellt und die Bedeutung der Klassifikation
insbesondere f{\"u}r Optimierungsaspekte erl{\"a}utert. Ferner werden drei grundlegende
Implementierungskonzepte f{\"u}r anfragegenerierende Systeme vorgestellt und deren
Eignung f{\"u}r einzelne Anwendungsklassen diskutiert.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2011-07&engl=1}
}
@article {ART-2011-04,
author = {Jorge Minguez and Philipp Riffelmacher and Bernhard Mitschang and Engelbert Westk{\"a}mper},
title = {{Servicebasierte Integration von Produktionsanwendungen}},
journal = {Werkstattstechnik online},
publisher = {Springer-VDI Verlag},
volume = {3-2011},
pages = {128--133},
type = {Article in Journal},
month = {March},
year = {2011},
keywords = {service-oriented architecture; SOA; ESB; manufacturing; produktion; lernfabrik; produktionsanwendungen; servicebasierte integration},
language = {German},
cr-category = {D.2.11 Software Engineering Software Architectures,
D.2.13 Software Engineering Reusable Software},
ee = {http://www.technikwissen.de/wt/currentarticle.php?data[article_id]=59574},
contact = {jorge.minguez@gsame.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In einem modernen Produktionsumfeld soll es m{\"o}glich sein,
informationstechnische Prozesse an die sich zunehmend {\"a}ndernden
Gesch{\"a}ftsbedingungen anzupassen. Um eine schnelle Anpassung zu realisieren, ist
eine flexible Integration unterschiedlicher Informationssysteme erforderlich,
da die Informationsfl{\"u}sse durch system{\"u}bergreifende Datenbearbeitungsprozesse
gesteuert werden. Die heterogene Landschaft der digitalen Werkzeuge stellt
dabei eine enorme Herausforderung dar. Der vorgestellte servicebasierte Ansatz
adressiert diese Problematik.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2011-04&engl=1}
}
@article {ART-2011-03,
author = {Nazario Cipriani and Matthias Wieland and Matthias Grossmann and Daniela Nicklas},
title = {{Tool support for the design and management of context models}},
journal = {Information Systems},
editor = {Gottfried Vossen and Tadeusz Morzy},
address = {Oxford, UK, UK},
publisher = {Elsevier Science Ltd.},
volume = {36},
number = {1},
pages = {99--114},
type = {Article in Journal},
month = {March},
year = {2011},
isbn = {0306-4379},
language = {English},
cr-category = {H.2.4 Database Management Systems},
ee = {http://www.sciencedirect.com/science?_ob=PublicationURL&_tockey=%23TOC%235646%232011%23999639998%232475749%23FLA%23&_cdi=5646&_pubType=J&_auth=y&_acct=C000022964&_version=1&_urlVersion=0&_userid=479010&md5=90fcaef40ac5285da3d69e894c214388,
http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6V0G-50GMMMG-4-1K&_cdi=5646&_user=479010&_pii=S0306437910000669&_origin=browse&_zone=rslt_list_item&_coverDate=03%2F31%2F2011&_sk=999639998&wchp=dGLbVtb-zSkzk&md5=aac6f0561c2464d528bcce117970acff&ie=/sdarticle.pdf},
department = {University of Stuttgart, Institute of Architecture of Application Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {A central task in the development of context-aware applications is the modeling
and management of complex context information. In this paper, we present the
NexusEditor, which can ease this task by providing a graphical user interface
to design schemas for spatial and technical context models, interactively
create queries, send them to a server and visualize the results. One main
contribution is to show how schema awareness can improve such a tool: The
NexusEditor dynamically parses the underlying data model and provides
additional syntactic checks, semantic checks, and short-cuts based on the
schema information. Furthermore, the tool helps to design new schema
definitions based on the existing ones, which is crucial for an iterative and
user-centric development of context-aware applications. Finally, it provides
interfaces to existing information spaces and visualization tools for spatial
data like GoogleEarth.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2011-03&engl=1}
}
@article {ART-2009-30,
author = {Nicola H{\"o}nle and Matthias Grossmann and Daniela Nicklas and Bernhard Mitschang},
title = {{Design and implementation of a domain-aware data model for pervasive context information}},
journal = {Computer Science Research + Development},
publisher = {Springer},
volume = {24},
number = {1-2},
pages = {69--83},
type = {Article in Journal},
month = {September},
year = {2009},
language = {English},
cr-category = {H.2.1 Database Management Logical Design,
H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {We introduce a data model for a context-management middleware that enables
context-aware and pervasive computing applications to transparently access
available data providers and that effectively combines their data. Our approach
supports new data fusion concepts for overlapping and heterogeneous data sets
and thus maximizes the information presented to the application. The main part
of our data model is a flexible concept for meta data that is able to represent
important aspects like quality, data derivation, or temporal characteristics of
data. Attributes having multiple values are utilized to represent sensor
measurements histories like locations of mobile objects at different points in
time. In our paper, we characterize the requirements for our data model and
show that existing data models, including the (object-) relational data model
and standard XML data models, do not offer the required flexibility. Therefore
basic XML technology is extended to support the necessary meta data concept and
multiply typed objects.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2009-30&engl=1}
}
@article {ART-2009-09,
author = {Jing Lu and Bernhard Mitschang},
title = {{Enforcing Data Consistency in Data Integration Systems by XQuery Trigger Service}},
journal = {International Journal of Web Information Systems},
publisher = {Emerald Group Publishing Limited},
volume = {5},
number = {2},
pages = {1--19},
type = {Article in Journal},
month = {May},
year = {2009},
language = {English},
cr-category = {E.0 Data General},
contact = {jinglu76@gmail.com},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Nowadays XML-based data integration systems are accepted as data service
providers on the web. In order to make such a data integration system fully
equipped with data manipulation capabilities, programming frameworks which
support update at the integration level are being developed. When the user is
permitted to submit updates, it is necessary to establish the best possible
data consistency for the whole data integration system. To that extend, we
present an approach based on an XQuery trigger service. We define an XQuery
trigger model together with its semantics. We report on the integration of the
XQuery trigger service into the overall architecture and discuss details of the
execution model. Experiments show that our approach provides an easy, efficient
and convenient way to achieve data consistency at the global level.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2009-09&engl=1}
}
@article {ART-2009-08,
author = {Nazario Cipriani and Daniela Nicklas and Matthias Gro{\ss}mann and Nicola H{\"o}nle and Carlos L{\"u}bbe and Bernhard Mitschang},
title = {{Verteilte Datenstromverarbeitung von Sensordaten}},
journal = {Datenbank-Spektrum},
publisher = {dpunkt Verlag},
volume = {9},
number = {28},
pages = {37--43},
type = {Article in Journal},
month = {February},
year = {2009},
language = {German},
cr-category = {H.2.4 Database Management Systems,
H.2.8 Database Applications,
E.4 Data Coding and Information Theory},
contact = {nazario.cipriani@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Technologischer Fortschritt im Bereich der Mikroelektronik und
Kommunikationstechnik f{\"u}hren zunehmend zu einem stark vernetzten, mit Sensoren
ausgestatteten Umfeld. Die Herausforderung, die in diesem Zusammenhang
auftretenden kontinuierlichen Datenstr{\"o}me zu verwalten und effizient zu
verarbeiten, sowie heterogene Sensorger{\"a}te und Netztopologien zu integrieren,
ist f{\"u}r viele Anwendungsentwickler eine zu gro{\ss}e H{\"u}rde. In dieser Arbeit wird
eine Middleware vorgestellt, die es einer Anwendung erm{\"o}glicht, anfragebasiert
die Verarbeitung von kontinuierlichen Datenstr{\"o}men zu steuern. Die zur
Verarbeitung der Daten ben{\"o}tigten Operatoren werden virtualisiert ausgef{\"u}hrt,
um die gr{\"o}{\ss}tm{\"o}gliche Flexibilit{\"a}t bei der Verteilung auf die beteiligten
physischen Knoten zu erreichen. Weiterhin werden Ans{\"a}tze zur Komprimierung von
Datenstr{\"o}men vorgestellt, um das Gesamtvolumen der ausgetauschten Daten
zwischen den Knoten zu reduzieren.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2009-08&engl=1}
}
@article {ART-2009-03,
author = {Daniela Nicklas and Matthias Grossmann and Matthias Wieland},
title = {{Context Modeling for Mobile, Adaptive Applications}},
journal = {it - Information Technology},
publisher = {Oldenbourg Wissenschaftsverlag GmbH},
volume = {51},
number = {2},
pages = {85--92},
type = {Article in Journal},
month = {March},
year = {2009},
doi = {10.1524/itit.2009.0527},
keywords = {ubiquitous Computing; pervasive Computing; context modeling; mobile applications; Content Analysis and indexing},
language = {English},
cr-category = {H.2.4 Database Management Systems,
H.2.8 Database Applications,
H.3 Information Storage and Retrieval},
ee = {http://www.atypon-link.com/OLD/toc/itit/51/2,
http://www.atypon-link.com/OLD/doi/pdf/10.1524/itit.2009.0527},
department = {University of Stuttgart, Institute of Architecture of Application Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Abstract
Context-aware applications adapt their behavior according to the current
situation. In this paper we present a layered model to separate different tasks
and concerns in designing data models for context-aware applications. The
layers are discussed along the Nexus approach, which is designed to support
large-scale, mobile applications by federated context models. Furthermore,
related approaches are described and compared.
Zusammenfassung
Kontextbezogene Anwendungen adaptieren ihr Verhalten aufgrund von Situationen.
Dieser Beitrag pr{\"a}sentiert ein Ebenenmodell f{\"u}r Datenmodelle solcher
Anwendungen, das verschiedene Probleme und Aufgaben im Entwurf der Daten, der
Adaptionslogik und der Verarbeitung kapselt. Die Ebenen werden anhand des
Nexus-Ansatzes diskutiert, dessen Ziel es ist, mobile, kontext-bezogene
Anwendungen durch f{\"o}derierte Kontextmodelle zu unterst{\"u}tzen, und mit verwandten
Arbeiten verglichen.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2009-03&engl=1}
}
@article {ART-2008-16,
author = {Stefanie Springer and Severin Beucker and Daniel Heubach and Fabian Kaiser and Dierk-Oliver Kiehne and Mih{\'a}ly Jakob},
title = {{Mit Softwaretools zu nachhaltigen Produkt- und Serviceinnovationen}},
journal = {{\"O}kologisches Wirtschaften},
address = {M{\"u}nchen},
publisher = {oekom verlag},
pages = {43--46},
type = {Article in Journal},
month = {August},
year = {2008},
issn = {1430-8800},
language = {German},
cr-category = {H.3.3 Information Search and Retrieval,
D.2.3 Software Engineering Coding Tools and Techniques},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Wie k{\"o}nnen Unternehmen durch Produktinnovationen einen Beitrag dazu leisten,
dass das Leitbild einer nachhaltigen Entwicklung umgesetzt wird, und welche
Potenziale bietet dabei der Einsatz des Internet? Das waren die zentralen
Fragen, die in dem von 2003 bis 2007 vom BMBF gef{\"o}rderten Projekt nova-net
gestellt wurden.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2008-16&engl=1}
}
@article {ART-2008-06,
author = {Daniel Heubach and Severin Beucker and Fabian Kaiser and Mih{\'a}ly Jakob and Dierk-Oliver Kiehne},
title = {{Fr{\"u}he Innovationsphasen - Informationsgewinnung durch delphigest{\"u}tztes Szenariomanagement und Expertensuche}},
journal = {ZWF Zeitschrift f{\"u}r wirtschaftlichen Fabrikbetrieb},
publisher = {Hanser-Verlag},
pages = {260--264},
type = {Article in Journal},
month = {April},
year = {2008},
issn = {0947-0085},
keywords = {fr{\"u}he Innovationsphasen; delphigest{\"u}tztes Szenario-Management; Expertensuche; semanti-sche Modelle; Internet; Technologiemanagement},
language = {German},
cr-category = {H.3 Information Storage and Retrieval,
H.3.3 Information Search and Retrieval},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Fr{\"u}he Innovationsphasen stellen f{\"u}r Unternehmen eine besondere Herausforderung
dar, da in ihnen Orientierungswissen geschaffen und strukturiert werden muss,
das bei einer Entscheidungsfindung {\"u}ber den Einstieg in ein Technologie- oder
Marktfeld oder auch ein bestimmtes Produktsegment unterst{\"u}tzt. Daf{\"u}r k{\"o}nnen
verschiedene Informationsquellen wie z.B. das Internet und externe Experten
genutzt werden. Hier setzt das Forschungsprojekt nova-net an. Es stellt die
zwei Methoden des delphigest{\"u}tzten Szenario-Management und der Expertensuche
mit den dazugeh{\"o}rigen Softwaretools SEMAFOR und EXPOSE zur Verf{\"u}gung, die den
Arbeitsaufwand der Informationsbeschaffung und Strukturierung deutlich
reduzieren und die in ihrer Kombination eine gute Unterst{\"u}tzung bei der
Entscheidungsfindung in fr{\"u}hen Innovationsphasen liefern.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2008-06&engl=1}
}
@article {ART-2007-09,
author = {Christoph Mangold},
title = {{A survey and classification of semantic search approaches}},
journal = {International Journal of Metadata, Semantics and Ontologies},
publisher = {Inderscience},
volume = {2},
number = {1},
pages = {23--34},
type = {Article in Journal},
month = {December},
year = {2007},
doi = {10.1504/IJMSO.2007.015073},
language = {English},
cr-category = {H.3.3 Information Search and Retrieval,
I.2.4 Knowledge Representation Formalisms and Methods},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/ART-2007-09/ART-2007-09.pdf,
http://www.inderscience.com/browse/index.php?journalID=152&year=2007&vol=2&issue=1},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {A broad range of approaches to semantic document retrieval has been developed
in the context of the Semantic Web. This survey builds bridges among them. We
introduce a classification scheme for semantic search engines and clarify
terminology. We present an overview of ten selected approaches and compare them
by means of our classification criteria. Based on this comparison, we identify
not only common concepts and outstanding features, but also open issues.
Finally, we give directions for future application development and research.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2007-09&engl=1}
}
@article {ART-2007-08,
author = {Fabian Kaiser and Mih{\'a}ly Jakob and Sebastian Wiedersheim and Holger Schwarz},
title = {{Framework-Unterst{\"u}tzung f{\"u}r aufwendige Websuche}},
journal = {Datenbank-Spektrum},
publisher = {dpunkt-Verlag},
volume = {7},
number = {23},
pages = {13--20},
type = {Article in Journal},
month = {November},
year = {2007},
language = {German},
cr-category = {H.3.3 Information Search and Retrieval},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Die Nutzung des WWW als wichtige Informationsquelle ist heute fester
Bestandteil der t{\"a}glichen Arbeit. Komplexe Suchaufgaben resultieren hierbei
h{\"a}ufig in nicht trivialen, lang andauernden Suchprozessen, in deren Rahmen
gro{\ss}e Datenmengen verwaltet und analysiert werden m{\"u}ssen. Ein Beispiel hierf{\"u}r
ist die Suche nach Experten zu einem gegebenen Themenkomplex. Dieser Beitrag
stellt das Softwareframework Supernova vor, das derartige Suchprozesse
unterst{\"u}tzt. Die flexible und erweiterbare Suchplattform erlaubt es, einen
Focused Crawler mit Standardsuchmaschinen zu kombinieren, stellt diverse
Analysekomponenten sowie die Infrastruktur f{\"u}r deren Daten- und
Informationsaustausch bereit und bildet somit die Basis f{\"u}r eine effiziente
Websuche bei komplexen Fragestellungen.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2007-08&engl=1}
}
@article {ART-2006-20,
author = {Christian Becker and Frank D{\"u}rr and Mirko Knoll and Daniela Nicklas and Torben Weis},
title = {{Entwicklung ortsbezogener Anwendungen}},
journal = {Praxis der Informationsverarbeitung und Kommunikation (PIK)},
editor = {Hans G. Kruse},
publisher = {K. G. Saur Verlag},
volume = {29},
number = {1},
pages = {30--36},
type = {Article in Journal},
month = {January},
year = {2006},
doi = {10.1515/PIKO.2006.30},
keywords = {ortsbezogene Dienste; Anwendungsentwicklung},
language = {German},
cr-category = {D.2.2 Software Engineering Design Tools and Techniques},
contact = {Torben Weis torben.weis@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Ortsbezogene Anwendungen versorgen Benutzer mit Diensten und Informationen in
Abh{\"a}ngigkeit ihrer aktuellen Position. Beispiele sind im Bereich der
Telematikdienste, Touristenf{\"u}hrer und Informationsdienste zu finden.
Anwendungen k{\"o}nnen sich an den Ort oder ganz allgemein an den Kontext des
Benutzers anpassen, indem sie ortsspezifische Informationen anzeigen oder Daten
der jeweiligen Situation entsprechend pr{\"a}sentieren. Es existieren verschieden
Ans{\"a}tze und Architekturen, um ortsbezogene Anwendungen zu realisieren. In
diesem Beitrag stellen wir unterschiedliche Ans{\"a}tze vor, diskutieren deren Vor-
und Nachteile und leiten daraus ein abstraktes Anwendungsmodell f{\"u}r
ortsbezogene Anwendungen ab. Des Weiteren stellen wir Entwicklungswerkzeuge
vor, die wir auf Basis dieses Anwendungsmodells konzipiert haben. Die
Kombination aus Anwendungsmodell und spezialisierten Werkzeugen wird die
Entwicklung ortsbezogener Anwendungen systematisieren und wesentlich
vereinfachen.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2006-20&engl=1}
}
@article {ART-2006-13,
author = {Kurt Rothermel and Thomas Ertl and Fritsch Dieter and Paul J. K{\"u}hn and Bernhard Mitschang and Engelbert Westk{\"a}mper and Christian Becker and Dominique Dudkowski and Andreas Gutscher and Christian Hauser and Lamine Jendoubi and Daniela Nicklas and Steffen Volz and Matthias Wieland},
title = {{SFB 627 – Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme}},
journal = {Informatik - Forschung und Entwicklung},
publisher = {Springer-Verlag},
volume = {21},
number = {1-2},
pages = {105--113},
type = {Article in Journal},
month = {June},
year = {2006},
language = {German},
cr-category = {C.2.4 Distributed Systems,
H.2.4 Database Management Systems,
H.2.8 Database Applications,
H.3 Information Storage and Retrieval},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/ART-2006-13/ART-2006-13.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
Universit{\"a}t Stuttgart, Institut f{\"u}r Industrielle Fertigung und Fabrikbetrieb (IFF);
University of Stuttgart, Institute of Visualisation and Interactive Systems, Visualisation and Interactive Systems;
Universit{\"a}t Stuttgart, Institut f{\"u}r Photogrammetrie (ifp);
University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems;
Universit{\"a}t Stuttgart, Institut f{\"u}r Kommunikationsnetze und Rechnersysteme (IKR)},
abstract = {Computersysteme, wie wir sie heute kennen, passen sich typischerweise nicht an
den Benutzer und dessen Situation an. Erste Beispiele von Systemen, die durch
den Bezug zur Realwelt den Kontext des Benutzers einbeziehen, sind
Navigationssysteme, die unter Ber{\"u}cksichtigung der Position eines Benutzers und
der Verkehrslage Richtungsanweisungen geben k{\"o}nnen. Damit innovative
kontextbezogene Anwendungen m{\"o}glich werden, muss der Kontext, also der Zustand
der Realwelt, durch Sensoren erfasst, in das Computersystem {\"u}bermittelt und
dort in Form dynamischer Umgebungsmodelle den Anwendungen zur Verf{\"u}gung
gestellt werden.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2006-13&engl=1}
}
@article {ART-2006-10,
author = {Mih{\'a}ly Jakob and Fabian Kaiser and Holger Schwarz and Severin Beucker},
title = {{Generierung von Webanwendungen f{\"u}r das Innovationsmanagement}},
journal = {it - Information Technology},
publisher = {Oldenbourg},
volume = {48},
number = {4},
pages = {225--232},
type = {Article in Journal},
month = {August},
year = {2006},
language = {German},
cr-category = {K.6.3 Software Management},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Der folgende Beitrag gibt einen {\"U}berblick {\"u}ber das vom Bundesministerium f{\"u}r
Bildung und Forschung (BMBF) im Schwerpunktprogramm Internet{\"o}konomie gef{\"o}rderte
Forschungsprojekt nova-net: Innovation in der Internet{\"o}konomie. Neben dem
Forschungsrahmen und den Forschungsfeldern zur Unterst{\"u}tzung nachhaltiger
Innovationsprozesse, wird insbesondere auf die Methodenentwicklung und deren
informationstechnische Umsetzung im Themenfeld Trendmonitoring im
Szenariomanagement eingegangen. Im Mittelpunkt steht hierbei die Erl{\"a}uterung
des Szenario-Management-Frameworks SEMAFOR im Zusammenhang mit einer neu
entwickelten Methode zur Entwicklung von Szenarien, sowie deren Umsetzung
mittels des Webanwendungsgenerators WAGen.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2006-10&engl=1}
}
@article {ART-2005-01,
author = {Stefan Berger and Christoph Mangold and Sebastian Meyer},
title = {{Ontologiebasiertes Wissensmanagement in der Montage - Wissen in turbulenten Zeiten strukturiert einsetzen}},
journal = {Industrie Management},
address = {Berlin},
publisher = {Gito Verlag},
volume = {21},
number = {1},
pages = {49--52},
type = {Article in Journal},
month = {February},
year = {2005},
language = {German},
cr-category = {H.3.3 Information Search and Retrieval},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Produzierende Unternehmen sind in turbulenten Aufgabenumfeldern mit
permanenten, nicht vorhersehbaren Ver{\"a}nderungen konfrontiert, zu deren
Bew{\"a}ltigung ihnen nur wenig Zeit zur Verf{\"u}gung steht. Bereits heute pr{\"a}gen
Umr{\"u}st- und Einstellvorg{\"a}nge den Arbeitsalltag in der Montage. In diesem Umfeld
kommt dem Menschen als Probleml{\"o}ser und damit der Ressource Wissen und deren
Management eine immer bedeutendere Funktion zu. Der folgende Beitrag zeigt
Ansatzpunkte f{\"u}r ein Wissensmanagement-System f{\"u}r die Montage.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2005-01&engl=1}
}
@article {ART-2004-20,
author = {Bernhard Mitschang and Stefan Jablonski},
title = {{Middleware-Technologien zur Systemintegration}},
journal = {Middleware-Technologien zur Systemintegration},
editor = {Oldenbourg},
publisher = {Oldenbourg Wissenschaftsverlag},
volume = {46},
number = {4},
pages = {173--174},
type = {Article in Journal},
month = {April},
year = {2004},
issn = {1611-2776},
language = {English},
cr-category = {H.2.2 Database Management Physical Design},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Middleware-Technologien zur Systemintegration},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2004-20&engl=1}
}
@article {ART-2004-14,
author = {Bernhard Mitschang and Engelbert Westk{\"a}mper and Carmen Constantinescu and Uwe Heinkel and Benno L{\"o}ffler and Ralf Rantzau and Raplh Winkler},
title = {{Divide et Impera: A Flexible Integration of Layout Planning and Logistics Simulation through Data Change Propagation.}},
journal = {The CIRP - Journal of Manufacturing Systems},
editor = {J. Peklenik},
publisher = {CIRP},
volume = {33},
number = {6},
pages = {509--518},
type = {Article in Journal},
month = {November},
year = {2004},
language = {English},
cr-category = {J.6 Computer-Aided Engineering,
C.2.4 Distributed Systems},
contact = {Uwe.Heinkel@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The turbulent markets lead to new challenges for today’s enterprises, they have
to be transformable to stay competitive. Therefore, we developed a new approach
that integrates Logistic Simulation and Layout Planning to fulfil the goal of
improving the production system. Our approach is based on propagation and
transformation of data changes concerning the continuous adaptation tasks among
the Layout Planning and Logistics Simulation systems. Instead of relying on a
tightly integrated global data schema, we connect systems only as far as
required by building “bridges” between them. The systems that participate in
the integration are kept autonomous. We use several state-of-the-art XML
technologies in our integration system.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2004-14&engl=1}
}
@article {ART-2004-07,
author = {Stefan Berger and Christoph Mangold and Sebastian Meyer},
title = {{Wissensmanagement f{\"u}r die wandlungsf{\"a}hige Montage}},
journal = {wt Werkstattstechnik},
publisher = {Springer VDI Verlag},
volume = {94},
pages = {80--85},
type = {Article in Journal},
month = {March},
year = {2004},
language = {German},
cr-category = {H.3.3 Information Search and Retrieval},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Turbulente Aufgabenumfelder verlangen von Unternehmen schnelle Antworten und
Adaptionen. Wandlungsf{\"a}higkeit und Responsef{\"a}higkeit als entscheidende
Erfolgsfaktoren basieren auf Wissen, das kontinuierlich zu verbessern und
aufgabenspezifisch zu erneuern ist. Gezeigt wird, wie sich mit Hilfe von
Wissensmanagement gezielt die Wandlungsf{\"a}higkeit in und von Unternehmen –
speziell in der Montage – erh{\"o}hen l{\"a}sst.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2004-07&engl=1}
}
@article {ART-2004-06,
author = {Clemens Dorda and Hans-Peter Steiert and J{\"u}rgen Sellentin},
title = {{Modellbasierter Ansatz zur Anwendungsintegration}},
journal = {it - information technology},
publisher = {Oldenbourg},
volume = {46},
number = {4},
pages = {200--210},
type = {Article in Journal},
month = {August},
year = {2004},
keywords = {EAI; MDA; Model Driven Architecture; Enterprise Application Integration; UML; Unified Modeling Language},
language = {German},
cr-category = {D.1.2 Automatic Programming,
D.2.2 Software Engineering Design Tools and Techniques,
D.2.12 Software Engineering Interoperability,
D.2.13 Software Engineering Reusable Software,
H.2.5 Heterogeneous Databases,
I.6.5 Model Development},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/ART-2004-06/ART-2004-06.pdf,
http://www.it-inftech.de},
contact = {Clemens.Dorda@informatik.uni-stuttgart.de, Hans-Peter.Steiert@daimlerchrysler.com, Juergen.Sellentin@daimlerchrysler.com},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Moderne Produkte zur Anwendungsintegration in Unternehmen (EAI) bieten
Werkzeuge, um Integrati-onsszenarien zu modellieren. Allerdings lassen sich
damit heterogene IT-Umgebungen bisher immer nur ausschnittsweise darstellen, da
die Modelle unter-schiedlicher EAI-Produkte nicht ausgetauscht oder integriert
werden k{\"o}nnen. Unser Ziel ist es, die Bil-dung solcher ‚Integrationsinseln' zu
vermeiden. Dazu pr{\"a}sentieren wir einen Ansatz, der durch technologie- und
herstellerunabh{\"a}ngige Modellierung eine integ-rierte Sicht erlaubt. Unser
Vorgehensmodell schl{\"a}gt vor, diese integrierte Sicht werkzeuggest{\"u}tzt auf der
Basis von Repositories zu verfeinern, um die Reali-sierung mit konkreten
Produkten und das Deploy-ment auf konkreten Plattformen zu automatisieren.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2004-06&engl=1}
}
@article {ART-2004-03,
author = {Daniela Nicklas and Mitschang Bernhard},
title = {{On building location aware applications using an open platform based on the NEXUS Augmented World Model}},
journal = {Software and Systems Modeling},
editor = {Bernhard Rumpe and Robert France},
address = {Berlin Heidelberg},
publisher = {Springer-Verlag},
volume = {3},
number = {4},
type = {Article in Journal},
month = {December},
year = {2004},
keywords = {Augmented World Model; Context-aware applications; Nexus; location-aware; infrastructure; open platform},
language = {English},
cr-category = {H.2.1 Database Management Logical Design,
H.2.8 Database Applications},
contact = {Daniela Nicklas danickla@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {How should the World Wide Web look like if it were for location-based
information? And how would mobile, spatially aware applications deal with such
a platform? In this paper we present the NEXUS Augmented World Model, an object
oriented data model which plays a major role in an open framework for both
providers of location-based information and new kinds of applications: the
NEXUS platform. We illustrate the usability of the model with several sample
applications and show the extensibility of this framework. At last we present a
stepwise approach for building spatially aware applications in this
environment.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2004-03&engl=1}
}
@article {ART-2004-01,
author = {Holger Schwarz},
title = {{Konzeptueller und logischer Data-Warehouse-Entwurf: Datenmodelle und Schematypen f{\"u}r Data Mining und OLAP}},
journal = {Informatik Forschung und Entwicklung},
publisher = {Springer},
volume = {18},
number = {2},
pages = {53--67},
type = {Article in Journal},
month = {January},
year = {2004},
language = {German},
cr-category = {H.2.1 Database Management Logical Design,
H.2.7 Database Administration,
H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Im Data-Warehouse-Bereich gibt es sowohl auf der konzeptuellen als auch auf der
logischen Ebene unterschiedliche Modellierungsans{\"a}tze, deren Entwicklung sich
in der Regel an typischen Fragestellungen aus dem Bereich des Online Analytical
Processing (OLAP) orientierte. Daneben spielen aber auch andere Ans{\"a}tze zur
Analyse der Daten in einem Data Warehouse eine bedeutende Rolle. Ein wichtiger
Vertreter ist Data Mining, mit dessen Hilfe bislang unbekannte Muster und
Zusammenh{\"a}nge in Daten identifiziert werden k{\"o}nnen. Im vorliegenden Artikel
wird untersucht, in wieweit sich die in der Literatur vorgeschlagenen
konzeptuellen Datenmodelle f{\"u}r ein Data Warehouse eignen, das OLAP- und
Data-Mining-Analysen gleicherma{\ss}en unterst{\"u}tzt. Hierzu wird das COCOM-Modell,
das auf den Modellierungsm{\"o}glichkeiten verschiedener publizierter Modelle
aufbaut, vorgestellt und bewertet. F{\"u}r das logische Schema eines Data Warehouse
wird h{\"a}ufig ein sogenanntes Star-Schema oder ein Snowflake-Schema erstellt. F{\"u}r
diese und weitere Schematypen wird analysiert, welchen Einflu{\ss} die Wahl des
logischen Schemas auf Anwendungen aus den Bereichen OLAP und Data Mining hat.
Wichtige Kriterien sind hier unter anderem der Informationsgehalt und die
Performanz. Insgesamt zeigt dieser Beitrag, dass das COCOM-Modell und das
Star-Schema eine gute Grundlage f{\"u}r den integrierten Einsatz von OLAP und
Data-Mining bilden.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2004-01&engl=1}
}
@article {ART-2003-17,
author = {Bernhard Mitschang},
title = {{Data propagation as an enabling technology for collaboration and cooperative information systems}},
journal = {Computers in Industry},
address = {Amsterdam},
publisher = {Elsevier Science Publishers B.V.},
volume = {52},
pages = {59--69},
type = {Article in Journal},
month = {September},
year = {2003},
language = {English},
cr-category = {H.5.3 Group and Organization Interfaces},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Collaboration in cooperative information systems, as occurring in concurrent
design and engineering, exploits common work and information spaces. In this
paper we present the Transaction-Oriented Group and Coordination Service for
Data-Centric Applications (TOGA) and the DataPropagator CHAMPAGNE that together
realize a shared information space that is controlled by a basic collaboration
service. Our approach enables both, firstly, the evolution of a set of separate
applications to form a cooperative information system, i.e. it provides a
technique towards component-oriented system engineering. Secondly, it can be
exploited as a basic service within collaboration frameworks to effectively
manage common work and information spaces.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2003-17&engl=1}
}
@article {ART-2003-03,
author = {Holger Schwarz and Tobias Kraft and Ralf Rantzau and Bernhard Mitschang},
title = {{Optimierung von Anfragesequenzen in Business-Intelligence-Anwendungen}},
journal = {it - Information Technology},
address = {M{\"u}nchen},
publisher = {Oldenbourg},
volume = {45},
number = {4},
pages = {196--202},
type = {Article in Journal},
month = {August},
year = {2003},
keywords = {Data Warehouse, Business Intelligence, Anfragesequenzen, OLAP, Data Mining},
language = {German},
cr-category = {H.2.4 Database Management Systems,
H.2.7 Database Administration,
H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Zur Analyse der Daten in einem Data Warehouse kommen unterschiedliche
Business-Intelligence-Anwendungen zum Einsatz. Ein wichtiger Erfolgsfaktor f{\"u}r
deren Nutzung ist die Effizienz, mit der die erstellten Anfragen ausgef{\"u}hrt
werden. In diesem Beitrag wird zun{\"a}chst das typische Verarbeitungsszenario f{\"u}r
generierte Anfragesequenzen im Bereich Business Intelligence erl{\"a}utert. Darauf
aufbauend wird eine Reihe anwendungsneutraler Optimierungsstrategien erl{\"a}utert
und bewertet. Anhand von Messergebnissen wird gezeigt, dass es sich
insbesondere bei der Restrukturierung von Anfragesequenzen um einen
vielversprechenden Ansatz handelt.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2003-03&engl=1}
}
@article {ART-2003-02,
author = {Ralf Rantzau and Leonard Shapiro and Bernhard Mitschang and Quan Wang},
title = {{Algorithms and Applications for Universal Quantification in Relational Databases}},
journal = {Information Systems},
editor = {Christian S. Jensen},
publisher = {Elsevier},
volume = {28},
number = {1-2},
pages = {3--32},
type = {Article in Journal},
month = {January},
year = {2003},
keywords = {query operators; relational division; grouping; set containment join; frequent itemset discovery},
language = {English},
cr-category = {H.2.4 Database Management Systems},
ee = {http://www.elsevier.nl/locate/is},
contact = {rrantzau@acm.org},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Queries containing universal quantification are used in many applications,
including business intelligence applications and in particular data mining. We
present a comprehensive survey of the structure and performance of algorithms
for universal quantification. We introduce a framework that results in a
complete classification of input data for universal quantification. Then we go
on to identify the most efficient algorithm for each such class. One of the
input data classes has not been covered so far. For this class, we propose
several new algorithms. Thus, for the first time, we are able to identify the
optimal algorithm to use for any given input dataset.
These two classifications of optimal algorithms and input data are important
for query optimization. They allow a query optimizer to make the best selection
when optimizing at intermediate steps for the quantification problem.
In addition to the classification, we show the relationship between relational
division and the set containment join and we illustrate the usefulness of
employing universal quantifications by presenting a novel approach for frequent
itemset discovery.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2003-02&engl=1}
}
@article {ART-2002-08,
author = {Bernhard Mitschang and Aiko Frank},
title = {{A customizable shared information space to support concurrent design}},
journal = {Special Issue of Computers in Industry},
address = {Amsterdam},
publisher = {Elsevier Science Publishers B. V.},
volume = {48},
number = {1},
pages = {45--58},
type = {Article in Journal},
month = {May},
year = {2002},
issn = {0166-3615},
language = {English},
cr-category = {H.2.2 Database Management Physical Design},
contact = {Bernhard.Mitschang@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Sharing data is an important aspect in distributed design environments and
should be supported by an underlying system. Any synchronous access to data is
conflict prone. Applying concurrency control and two phase commit are one
option to be considered. But design processes also demand for cooperation
between the designers. Negotiation about actions on the product under design
and the early exchange of preliminary results are crucial issues. Controlled
data access by itself does not fulfil all the needs for cooperation. We will
present a new approach that relies on a concept and system model which
integrates concurrent activities by a joint information space offering flexible
protocols for cooperation on the shared objects. We will describe the
customizability of the protocols to effectively support different cooperative
scenarios.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2002-08&engl=1}
}
@article {ART-2002-03,
author = {Steffen (ifp) Volz and Matthias Gro{\ss}mann and Nicola H{\"o}nle and Daniela Nicklas and Thomas Schwarz},
title = {{Integration mehrfach repr{\"a}sentierter Stra{\ss}enverkehrsdaten f{\"u}r eine f{\"o}derierte Navigation}},
journal = {it+ti},
publisher = {Oldenbourg Verlag},
volume = {44},
number = {5},
pages = {260--267},
type = {Article in Journal},
month = {October},
year = {2002},
keywords = {Mobile Computing; Augmented World Model; GDF; ATKIS},
language = {German},
cr-category = {H.3.4 Information Storage and Retrieval Systems and Software,
H.2.1 Database Management Logical Design},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/ART-2002-03/ART-2002-03.pdf,
http://www.nexus.uni-stuttgart.de,
http://www.it-ti.de},
contact = {steffen.volz@ifp.uni-stuttgart.de, matthias.grossmann@informatik.uni-stuttgart.de, nicola.hoenle@informatik.uni-stuttgart.de, daniela.nicklas@informatik.uni-stuttgart.de, thomas.schwarz@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems;
Universit{\"a}t Stuttgart, Institut f{\"u}r Photogrammetrie (ifp)},
abstract = {Die Forschergruppe Nexus entwickelt eine offene, verteilte Plattform f{\"u}r
Anwendungen mit Ortsbezug. Dieser Artikel beschreibt, wie Strassenverkehrsdaten
aus unterschiedlichen Quellen in das gemeinsame Datenmodell der Plattform
integriert werden k{\"o}nnen, um Navigationsanwendungen zu erm{\"o}glichen. Die
Abbildung mehrfach repr{\"a}sentierter Daten in einem einheitlichen Schema ist
notwendig, um Anfragen auf Quellen zu verteilen, die Ergebnisse
zusammenzufassen und so bestehende Daten weiter nutzen zu k{\"o}nnen.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2002-03&engl=1}
}
@article {ART-2001-07,
author = {Daniela Nicklas and Matthias Gro{\ss}mann and Thomas Schwarz and Steffen Volz},
title = {{Architecture and Data Model of Nexus}},
journal = {GIS Geo-Informations-Systeme},
address = {Heidelberg},
publisher = {Herbert Wichmann Verlag},
volume = {9},
pages = {20--24},
type = {Article in Journal},
month = {September},
year = {2001},
keywords = {Nexus, architecture, data model, location-based, federation, standard class schema, extended class schema},
language = {English},
cr-category = {H.2.1 Database Management Logical Design,
H.3.4 Information Storage and Retrieval Systems and Software,
H.3.5 Online Information Services},
ee = {http://www.nexus.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems;
Universit{\"a}t Stuttgart, Institut f{\"u}r Photogrammetrie (ifp)},
abstract = {The NEXUS platform delivers location-based information and provides additional
supporting services enabling intelligent location-based applications. Its
architecture is open for both new applications and new information providers,
similar to the WWW. The Augmented World Model is a common data model for
location-based information. Our platform uses this model to federate such
information from different providers into an integrated view for the
applications. An example application shows the usage of this platform.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2001-07&engl=1}
}
@article {ART-2001-06,
author = {Matthias Gro{\ss}mann and Alexander Leonhardi and Bernhard Mitschang and Kurt Rothermel},
title = {{A World Model for Location-Aware Systems}},
journal = {Informatik},
publisher = {Schweizerischer Verband der Informatikorganisationen SVI/FSI},
volume = {8},
number = {5},
pages = {22--25},
type = {Article in Journal},
month = {October},
year = {2001},
keywords = {Mobile Computing; Location-Aware Applications; Augmented World Model},
language = {English},
cr-category = {H.2.3 Database Management Languages,
H.3.4 Information Storage and Retrieval Systems and Software},
ee = {http://www.nexus.uni-stuttgart.de},
contact = {matthias.grossmann@informatik.uni-stuttgart.de, alexander.leonhardi@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Distributed Systems;
University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Advanced location-aware applications require a detailed model of the real
world. The goal of the Nexus platform is to provide such a model together with
generic functionality to a wide variety of location-aware applications. In this
paper, we describe the characteristics of this Augmented World Model and the
architecture of the Nexus platform. We look in more detail at the two main
components responsible for the main aspects of the world model, namely the
spatial data and the position information of mobile objects.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2001-06&engl=1}
}
@article {ART-2000-06,
author = {Aiko Frank and J{\"u}rgen Sellentin and Bernhard Mitschang},
title = {{TOGA - a customizable service for data-centric collaboration}},
journal = {Information Systems, Data Bases: Their Creation Management and Utilization, Special Issue: The 11th International Conference on Advanced Information Systems Engineering (CAiSE*99)},
editor = {Matthias Jarke and Dennis Shasha},
address = {Oxford},
publisher = {Elsevier Science Ltd.},
volume = {25},
number = {2},
pages = {157--176},
type = {Article in Journal},
month = {April},
year = {2000},
keywords = {Concurrent Engineering; Collaboration, CSCW, Events},
language = {English},
cr-category = {D.2.12 Software Engineering Interoperability,
H.2.4 Database Management Systems,
H.3.4 Information Storage and Retrieval Systems and Software,
H.4.1 Office Automation},
contact = {Contact mitsch@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Collaboration in cooperative information systems, like concurrent design and
engineering, exploits common work and information spaces. In this paper we
introduce the TOGA service (Transaction-Oriented Group and Coordination Service
for Data-Centric Applications), which offers group management facilities and a
push model for change propagation w.r.t. shared data, thus allowing for group
awareness. Through TOGA’s customizability and its layered architecture the
service can be adapted to a variety of different collaboration scenarios.
Multiple communication protocols (CORBA, UDP/IP, TCP/IP) are supported as well
as basic transaction properties. Our approach enables both, firstly, the
evolution of a set of separate applications to form a cooperative information
system, i.e., it provides a technique towards component-oriented system
engineering. Secondly, it can be exploited as a basic service within
collaboration frameworks to effectively manage common work and information
spaces. In this paper we report on design issues, implementation aspects, and
first experiences gained with the TOGA prototype and its exploitation in an
activity coordination and collaboration framework system.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2000-06&engl=1}
}
@article {ART-1999-03,
author = {J{\"u}rgen Sellentin and Bernhard Mitschang},
title = {{Data-Intensive Intra- and Internet Applications: Experiences using Java and Corba in the World Wide Web}},
journal = {Object-oriented technology in advanced applications},
editor = {E. Bertino and S. Urban},
address = {New York},
publisher = {John Wiley},
volume = {5},
number = {3},
pages = {181--197},
type = {Article in Journal},
month = {October},
year = {1999},
language = {English},
cr-category = {H.2.4 Database Management Systems,
H.3.5 Online Information Services},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {Data-Intensive Intra- and Internet Applications: Experiences using Java and
Corba in the World Wide Web.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-1999-03&engl=1}
}
@article {ART-1999-02,
author = {Reiner Siebert},
title = {{An Open Architecture for Adaptive Workflow Management Systems}},
journal = {Transactions of the SDPS: Journal of Integrated Design and Process Science, 1999},
editor = {Society for Design and Process Science},
address = {Austin, Texas},
publisher = {Society for Design and Process Science},
pages = {1--9},
type = {Article in Journal},
month = {January},
year = {1999},
keywords = {Workflow Management; Adaptive Workflow Systems; Enterprise Process Management; Cooperative Work},
language = {English},
cr-category = {H.4.1 Office Automation},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/ART-1999-02/ART-1999-02.pdf},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {One of the main tasks of workflow management systems is the coordination of
activities in business processes. In this paper, a solution framework is
introduced to support adaptive workflows with complex and unstructured task
dependencies. In chapter one the research direction is motivated and a short
overview of the PoliFlow project is given. Then, requirements for enterprise
process management systems are identified with a special focus on coordination
and control of unstructured processes. Taking these requirements as a basis, an
integrated approach for adaptive workflow support is presented. A flexible
workflow model including advanced control structures allows actors to modify
workflow instances during runtime. Finally, a reference architecture is
proposed. By example of the SWATS system, it is illustrated how an adaptive
support layer can be integrated to extend existing workflow management systems.
The paper concludes with an overview over the current state of the work and
future issues.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-1999-02&engl=1}
}
@article {ART-1999-01,
author = {Wolfgang Becker and Cora Burger and J{\"u}rgen Klarmann and Ottokar Kulendik and Frank Schiele and Kerstin Schneider},
title = {{Rechnerunterst{\"u}tzung f{\"u}r Interaktionen zwischen Menschen: Begriffskl{\"a}rung, Anwendungsgebiete und Basiswerkzeuge.}},
journal = {Informatik Spektrum},
publisher = {Springer-Verlag},
volume = {22},
number = {6},
pages = {422--435},
type = {Article in Journal},
month = {December},
year = {1999},
keywords = {CSCW, Groupware, Teleteaching, Workflow-Management, Elektronischer Handel},
language = {German},
cr-category = {H.4.3 Information Systems Applications Communications Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Distributed Systems},
abstract = {Die Rechnerunterst{\"u}tzung von Interaktionen zwischen Menschen wird in
unterschiedlichen Forschungsgebieten untersucht. Das f{\"u}hrt zu einer
vielf{\"a}ltigen, {\"u}berlappenden Begriffsbildung unter den Disziplinen. Die dadurch
bedingte Begriffsverwirrung behindert fach{\"u}bergreifende gemeinsame Entwicklung,
Standardisierung und Wieder- / Weiterverwendung von Ergebnissen. Im folgenden
werden daher breit verwendete Grundbegriffe wie CSCW, Kooperation, Konkurrenz,
Koordination, Kommunikation, Groupware, Workflow-Management, Teleteaching und
Elektronischer Markt geeignet geordnet. Schwerpunkte liegen dabei auf den
m{\"o}glichen Anwendungsgebieten sowie auf den durch Rechner unterst{\"u}tzten
technischen Grundinteraktionen. Damit soll der Beitrag Anwendern und
Entwicklern helfen, f{\"u}r konkrete Anwendungsprobleme Anforderungen ermitteln zu
k{\"o}nnen und Grundelemente in existierenden komplexen Systemen zu identifizieren.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-1999-01&engl=1}
}
@article {ART-1998-10,
author = {Stefan De{\ss}loch and Theo H{\"a}rder and Nelson Mattos and Bernhard Mitschang and Joachim Thomas},
title = {{Advanced Data Processing in KRISYS: Modeling Concepts, Implementation Techniques, and Client/Server Issues}},
journal = {VLDB Journal},
publisher = {Springer},
volume = {7},
number = {2},
pages = {79--95},
type = {Article in Journal},
month = {May},
year = {1998},
keywords = {Object-oriented modeling concepts; Consistency control; Query processing; Run-time optimization; Client/server architectures},
language = {English},
cr-category = {H Information Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems},
abstract = {The increasing power of modern computers is steadily opening up new application
domains for advanced data processing such as engineering and knowledge-based
applications. To meet their requirements, concepts for advanced data management
have been investigated during the last decade, especially in the field of
object orientation. Over the last couple of years, the database group at the
University of Kaiserslautern has been developing such an advanced database
system, the KRISYS prototype. In this article, we report on the results and
experiences obtained in the course of this project. The primary objective for
the first version of KRISYS was to provide semantic features, such as an
expressive data model, a set-oriented query language, deductive as well as
active capabilities. The first KRISYS prototype became completely operational
in 1989. To evaluate its features and to stabilize its functionality, we
started to develop several applications with the system. These experiences
marked the starting point for an overall redesign of KRISYS. Major goals were
to tune KRISYS and its query-processing facilities to a suitable client/server
environment, as well as to provide elaborate mechanisms for consistency control
comprising semantic integrity constraints, multi-user synchronization, and
failure recovery. The essential aspects of the resulting client/server
architecture are embodied by the client-side data management needed to
effectively support advanced applications and to gain the required system
performance for interactive work. The project stages of KRISYS properly reflect
the essential developments that have taken place in the research on advanced
database systems over the last years. Hence, the subsequent discussions will
bring up a number of important aspects with regard to advanced data processing
that are of significant general importance, as well as of general applicability
to database systems.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-1998-10&engl=1}
}
@inbook {INBOOK-2021-01,
author = {Dimitri Petrik and Mathias Mormul and Peter Reimann and Christoph Gr{\"o}ger},
title = {{Anforderungen f{\"u}r Zeitreihendatenbanken im industriellen IoT}},
series = {IoT – Best Practices},
publisher = {Springer-Verlag},
pages = {339--377},
type = {Article in Book},
month = {May},
year = {2021},
keywords = {Zeitreihendaten; Zeitreihendatenbanken; Industrial IoT; Edge Computing; Data Lake; InfluxDB},
language = {German},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Das industrielle Internet der Dinge (IIoT) integriert Informations- und
Kommunikationstechnologien in industrielle Prozesse und erweitert sie durch
Echtzeit-Datenanalyse. Hierbei sind sensorbasierte Zeitreihen ein wesentlicher
Typ von Daten, die in der industriellen Fertigung generiert werden.
Sensorbasierte Zeitreihendaten werden in regelm{\"a}{\ss}igen Abst{\"a}nden generiert
und enthalten zus{\"a}tzlich zum Sensorwert einen Zeitstempel. Spezielle
Zeitreihen-Datenbanken (eng.: Time Series Databases (TSDB)) sind daf{\"u}r
ausgelegt, Zeitreihendaten effizient zu speichern. Wenn TSDBs maschinennah, d.
h. in der industriellen Edge, eingesetzt werden, sind Maschinendaten zur
{\"U}berwachung zeitkritischer Prozesse aufgrund der niedrigen Latenz schnell
verf{\"u}gbar, was die erforderliche Zeit f{\"u}r die Datenverarbeitung reduziert.
Andererseits k{\"o}nnen TSDBs auch in den Data Lakes als skalierbaren
Datenplattformen zur Speicherung und Analyse von Rohdaten zum Einsatz kommen,
um die langfristige Vorhaltung von Zeitreihendaten zu erm{\"o}glichen. Bisherige
Untersuchungen zu TSDBs sind bei der Auswahl f{\"u}r den Einsatz in der
industriellen Edge und im Data Lake nicht vorhanden. Die meisten verf{\"u}gbaren
Benchmarks von TSDBs sind performanceorientiert und ber{\"u}cksichtigen nicht die
Randbedingungen einer industriellen Edge oder eines Data Lake. Wir adressieren
diese L{\"u}cke und identifizieren funktionale Kriterien f{\"u}r den Einsatz von
TSDBs in diesen beiden Umgebungen und bilden somit einen qualitativen
Kriterienkatalog. Des Weiteren zeigen wir am Beispiel von InfluxDB, wie dieser
Katalog verwendet werden kann, mit dem Ziel die systematische Auswahl einer
passenden TSDB f{\"u}r den Einsatz in der Edge und im Data Lake zu unterst{\"u}tzen.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2021-01&engl=1}
}
@inbook {INBOOK-2019-03,
author = {Christoph Stach and Frank Steimle and Bernhard Mitschang},
title = {{How to Realize Device Interoperability and Information Security in mHealth Applications}},
series = {Biomedical Engineering Systems and Technologies},
address = {Cham},
publisher = {Springer Nature},
series = {Communications in Computer and Information Science},
volume = {1024},
pages = {213--237},
type = {Article in Book},
month = {August},
year = {2019},
isbn = {978-3-030-29195-2},
doi = {10.1007/978-3-030-29196-9_12},
keywords = {mHealth; Device interoperability; Information security; COPD},
language = {English},
cr-category = {H.5.0 Information Interfaces and Presentation General,
K.6.5 Security and Protection,
K.8 Personal Computing},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {More and more people suffer from chronic diseases such as the chronic
obstructive pulmonary disease (COPD). This leads to very high treatment costs
every year, as such patients require a periodic screening of their condition.
However, many of these checks can be performed at home by the patients
themselves. This enables physicians to focus on actual emergencies. Modern
smart devices such as Smartphones contribute to the success of these
telemedical approaches. So-called mHealth apps combine the usability and
versatility of Smartphones with the high accuracy and reliability of medical
devices for home use. However, patients often face the problem of how to
connect medical devices to their Smartphones (the device interoperability
problem). Moreover, many patients reject mHealth apps due to the lack of
control over their sensitive health data (the information security problem).
In our work, we discuss the usage of the Privacy Management Platform (PMP) to
solve these problems. So, we describe the structure of mHealth apps and present
a real-world COPD application. From this application we derive relevant
functions of an mHealth app, in which device interoperability or information
security is an issue. We extend the PMP in order to provide support for these
recurring functions. Finally, we evaluate the utility of these PMP extensions
based on the real-world mHealth app.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2019-03&engl=1}
}
@inbook {INBOOK-2019-02,
author = {Christoph Stach and Bernhard Mitschang},
title = {{Elicitation of Privacy Requirements for the Internet of Things Using ACCESSORS}},
series = {Information Systems Security and Privacy},
address = {Cham},
publisher = {Springer Nature},
series = {Communications in Computer and Information Science},
volume = {977},
pages = {40--65},
type = {Article in Book},
month = {July},
year = {2019},
isbn = {978-3-030-25108-6},
doi = {10.1007/978-3-030-25109-3_3},
keywords = {Permission model; Data-centric; Derivation transparent; Fine-grained; Context-sensitive; Internet of Things; PMP; PATRON},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
D.4.6 Operating Systems Security and Protection},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Novel smart devices are equipped with various sensors to capture context data.
The Internet of Things (IoT) connects these devices with each other in order to
bring together data from various domains. Due to the IoT, new application areas
come up continuously. For instance, the quality of life and living can be
significantly improved by installing connected and remote-controlled devices in
Smart Homes. Or the treatment of chronic diseases can be made more convenient
for both, patients and physicians, by using Smart Health technologies.
For this, however, a large amount of data has to be collected, shared, and
combined. This gathered data provides detailed insights into the user of the
devices. Therefore, privacy is a key issue for such IoT applications. As
current privacy systems for mobile devices focus on a single device only, they
cannot be applied to a distributed and highly interconnected environment as the
IoT. Therefore, we determine the special requirements towards a permission
models for the IoT. Based on this requirements specification, we introduce
ACCESSORS, a data-centric permission model for the IoT and describe how to
apply such a model to two promising privacy systems for the IoT, namely the
Privacy Management Platform (PMP) and PATRON.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2019-02&engl=1}
}
@inbook {INBOOK-2019-01,
author = {Christoph Stach},
title = {{Fine-Grained Privacy Control for Fitness and Health Applications Using the Privacy Management Platform}},
series = {Information Systems Security and Privacy},
address = {Cham},
publisher = {Springer Nature},
series = {Communications in Computer and Information Science},
volume = {977},
pages = {1--25},
type = {Article in Book},
month = {July},
year = {2019},
isbn = {978-3-030-25108-6},
doi = {10.1007/978-3-030-25109-3_1},
keywords = {Smartbands; Health and Fitness Applications; Privacy Concerns; Bluetooth; Internet; Privacy Policy Model; Privacy Management Platform},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
D.4.6 Operating Systems Security and Protection,
K.8 Personal Computing},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Due to the Internet of Things, novel types of sensors are integrated into
everyday objects. A domain that benefits most is the fitness and health domain.
With the advent of the so-called Smartbands - i.e., bracelets or watches with
built-in sensors such as heart rate sensors, location sensors, or even glucose
meters - novel fitness and health application are made possible. That way a
quantified self can be created. Despite all the advantages that such
applications entail, new privacy concerns arise.
These applications collect and process sensitive health data. Users are
concerned by reports about privacy violations. These violations are enabled by
inherent security vulnerabilities and deficiencies in the privacy systems of
mobile platforms. As none of the existing privacy approaches is designed for
the novel challenges arising from Smartband applications, we discuss, how the
Privacy Policy Model (PPM), a fine-grained and modular expandable permission
model, can be applied to this application area. This model is implemented in
the Privacy Management Platform (PMP). Thus the outcomes of this work can be
leveraged directly. Evaluation results underline the benefits of our work for
Smartband applications.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2019-01&engl=1}
}
@inbook {INBOOK-2018-01,
author = {Jasmin Guth and Uwe Breitenb{\"u}cher and Michael Falkenthal and Paul Fremantle and Oliver Kopp and Frank Leymann and Lukas Reinfurt},
title = {{A Detailed Analysis of IoT Platform Architectures: Concepts, Similarities, and Differences}},
series = {Internet of Everything: Algorithms, Methodologies, Technologies and Perspectives},
publisher = {Springer},
pages = {81--101},
type = {Article in Book},
month = {January},
year = {2018},
isbn = {10.1007/978-981-10-5861-5_4},
keywords = {Internet of Things; IoT; Platform; Reference Architecture; FIWARE; OpenMTC; SiteWhere; Webinos; AWS IoT; IBM Watson IoT Platform; Microsoft Azure IoT Hub},
language = {English},
cr-category = {D.2.11 Software Engineering Software Architectures,
D.4.7 Operating Systems Organization and Design,
H.3.4 Information Storage and Retrieval Systems and Software},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {The IoT is gaining increasing attention. The overall aim is to interconnect the
physical with the digital world. Therefore, the physical world is measured by
sensors and translated into processible data, and data has to be translated
into commands to be executed by actuators. Due to the growing interest in IoT,
the number of platforms designed to support IoT has risen considerably. As a
result of different approaches, standards, and use cases, there is a wide
variety and heterogeneity of IoT platforms. This leads to difficulties in
comprehending, selecting, and using appropriate platforms. In this work, we
tackle these issues by conducting a detailed analysis of several
state-of-the-art IoT platforms in order to foster the understanding of the (i)
underlying concepts, (ii) similarities, and (iii) differences between them. We
show that the various components of the different platforms can be mapped to an
abstract reference architecture, and analyze the effectiveness of this mapping.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2018-01&engl=1}
}
@inbook {INBOOK-2017-04,
author = {Laura Kassner and Christoph Gr{\"o}ger and Jan K{\"o}nigsberger and Eva Hoos and Cornelia Kiefer and Christian Weber and Stefan Silcher and Bernhard Mitschang},
title = {{The Stuttgart IT Architecture for Manufacturing}},
series = {Enterprise Information Systems: 18th International Conference, ICEIS 2016, Rome, Italy, April 25--28, 2016, Revised Selected Papers},
publisher = {Springer International Publishing},
series = {Lecture Notes in Business Information Processing},
volume = {291},
pages = {53--80},
type = {Article in Book},
month = {June},
year = {2017},
isbn = {978-3-319-62386-3},
doi = {10.1007/978-3-319-62386-3_3},
language = {English},
cr-category = {H.4.0 Information Systems Applications General,
D.2.12 Software Engineering Interoperability,
J.2 Physical Sciences and Engineering},
ee = {https://link.springer.com/chapter/10.1007/978-3-319-62386-3_3},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The global conditions for manufacturing are rapidly changing towards shorter
product life cycles, more complexity and more turbulence. The manufacturing
industry must meet the demands of this shifting environment and the increased
global competition by ensuring high product quality, continuous improvement of
processes and increasingly flexible organization. Technological developments
towards smart manufacturing create big industrial data which needs to be
leveraged for competitive advantages. We present a novel IT architecture for
data-driven manufacturing, the Stuttgart IT Architecture for Manufacturing
(SITAM). It addresses the weaknesses of traditional manufacturing IT by
providing IT systems integration, holistic data analytics and mobile
information provisioning. The SITAM surpasses competing reference architectures
for smart manufacturing because it has a strong focus on analytics and mobile
integration of human workers into the smart production environment and because
it includes concrete recommendations for technologies to implement it, thus
filling a granularity gap between conceptual and case-based architectures. To
illustrate the benefits of the SITAM{\^a}€™s prototypical implementation, we
present an application scenario for value-added services in the automotive
industry.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2017-04&engl=1}
}
@inbook {INBOOK-2017-03,
author = {Christoph Stach and Frank Steimle and Ana Cristina Franco da Silva},
title = {{TIROL: The Extensible Interconnectivity Layer for mHealth Applications}},
series = {Information and Software Technologies: 23nd International Conference, ICIST 2017, Druskininkai, Lithuania, October 12-14, 2017, Proceedings},
address = {Cham},
publisher = {Springer International Publishing},
series = {Communications in Computer and Information Science},
pages = {1--12},
type = {Article in Book},
month = {October},
year = {2017},
keywords = {mHealth; medical devices; harmonization; interconnectivity layer},
language = {English},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
J.3 Life and Medical Sciences},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {The prevalence of various chronic conditions is on the rise. Periodic
screenings and a persistent therapy are necessary in order to aid the patients.
Increasing medical costs and overburdened physicians are the consequences. A
telemedical self-management of the illness is considered as the answer to this
problem. For this purpose mHealth applications, i.e., the synergy of common
smartphones and medical metering devices, are vitally needed. However, poor
device interoperability due to heterogeneous connectivity methods hamper the
usage of such applications. For this very reason, we introduce the concept for
an exTensible InteRcOnnectivity Layer (TIROL) to deal with the
interconnectivity issues of mHealth applications. Furthermore, we present a
prototypical implementation for TIROL to demonstrate the benefits of our
approach.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2017-03&engl=1}
}
@inbook {INBOOK-2017-01,
author = {Pascal Hirmer and Michael Behringer},
title = {{FlexMash 2.0 – Flexible Modeling and Execution of Data Mashups}},
series = {Rapid Mashup Development Tools - Second International Rapid Mashup Challenge, RMC 2016, Lugano, Switzerland, June 6, 2016, Revised Selected Papers},
publisher = {Springer International Publishing},
series = {Communications in Computer and Information Science},
volume = {696},
pages = {10--29},
type = {Article in Book},
month = {January},
year = {2017},
isbn = {978-3-319-53173-1},
isbn = {978-3-319-53174-8},
doi = {10.1007/978-3-319-53174-8},
issn = {1865-0929},
keywords = {ICWE Rapid Mashup Challenge 2016; FlexMash; Data processing and integration; Pipes and Filters},
language = {English},
cr-category = {H.2.8 Database Applications,
H.3.0 Information Storage and Retrieval General,
E.1 Data Structures},
ee = {http://link.springer.com/chapter/10.1007/978-3-319-53174-8_2},
contact = {pascal.hirmer@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In recent years, the amount of data highly increases through cheap hardware,
fast network technology, and the increasing digitization within most domains.
The data produced is oftentimes heterogeneous, dynamic and originates from many
highly distributed data sources. Deriving information and, as a consequence,
knowledge from this data can lead to a higher effectiveness for problem solving
and thus higher profits for companies. However, this is a great challenge {\^a}€“
oftentimes referred to as Big Data problem. The data mashup tool FlexMash,
developed at the University of Stuttgart, tackles this challenge by offering a
means for integration and processing of heterogeneous, dynamic data sources. By
doing so, FlexMash focuses on (i) an easy means to model data integration and
processing scenarios by domain-experts based on the Pipes and Filters pattern,
(ii) a flexible execution based on the user{\^a}€™s non-functional requirements,
and (iii) high extensibility to enable a generic approach. A first version of
this tool was presented during the ICWE Rapid Mashup Challenge 2015. In this
article, we present the new version FlexMash 2.0, which introduces new features
such as cloud-based execution and human interaction during runtime. These
concepts have been presented during the ICWE Rapid Mashup Challenge 2016.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2017-01&engl=1}
}
@inbook {INBOOK-2016-04,
author = {Uwe Breitenb{\"u}cher and Tobias Binz and Oliver Kopp and K{\'a}lm{\'a}n K{\'e}pes and Frank Leymann and Johannes Wettinger},
title = {{Hybrid TOSCA Provisioning Plans: Integrating Declarative and Imperative Cloud Application Provisioning Technologies}},
series = {Cloud Computing and Services Science},
publisher = {Springer International Publishing},
series = {Communications in Computer and Information Science},
volume = {581},
pages = {239--262},
type = {Article in Book},
month = {February},
year = {2016},
doi = {10.1007/978-3-319-29582-4_13},
isbn = {978-3-319-29581-7},
keywords = {Cloud application provisioning; TOSCA; Hybrid plans; Automation; Declarative modelling; Imperative modelling; Integration},
language = {English},
cr-category = {K.6 Management of Computing and Information Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {The efficient provisioning of complex applications is one of the most
challenging issues in Cloud Computing. Therefore, various provisioning and
configuration management technologies have been developed that can be
categorized as follows: imperative approaches enable a precise specification of
the low-level tasks to be executed whereas declarative approaches focus on
describing the desired goals and constraints. Since complex applications employ
a plethora of heterogeneous components that must be wired and configured,
typically multiple of these technologies have to be integrated to automate the
entire provisioning process. In a former work, we presented a workflow
modelling concept that enables the seamless integration of imperative and
declarative technologies. This paper is an extension of that work to integrate
the modelling concept with the Cloud standard TOSCA. In particular, we show how
Hybrid Provisioning Plans can be created that retrieve all required information
about the desired provisioning directly from the corresponding TOSCA model. We
validate the practical feasibility of the concept by extending the OpenTOSCA
runtime environment and the workflow language BPEL.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2016-04&engl=1}
}
@inbook {INBOOK-2016-01,
author = {Pascal Hirmer and Bernhard Mitschang},
title = {{FlexMash - Flexible Data Mashups Based on Pattern-Based Model Transformation}},
series = {Rapid Mashup Development Tools},
publisher = {Springer International Publishing},
series = {Communications in Computer and Information Science},
volume = {591},
pages = {12--30},
type = {Article in Book},
month = {February},
year = {2016},
isbn = {978-3-319-28726-3},
doi = {10.1007/978-3-319-28727-0_2},
keywords = {ICWE rapid mashup challenge 2015; Data mashups; Transformation patterns; TOSCA; Cloud computing},
language = {English},
cr-category = {H.2.8 Database Applications,
H.3.0 Information Storage and Retrieval General,
E.1 Data Structures},
ee = {http://dx.doi.org/10.1007/978-3-319-28727-0_2},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Today, the ad-hoc processing and integration of data is an important issue due
to fast growing IT systems and an increased connectivity of the corresponding
data sources. The overall goal is deriving high-level information based on a
huge amount of low-level data. However, an increasing amount of data leads to
high complexity and many technical challenges. Especially non-IT expert users
are overburdened with highly complex solutions such as Extract-Transform-Load
processes. To tackle these issues, we need a means to abstract from technical
details and provide a flexible execution of data processing and integration
scenarios. In this paper, we present an approach for modeling and pattern-based
execution of data mashups based on Mashup Plans, a domain-specific mashup model
that has been introduced in previous work. This non-executable model can be
mapped onto different executable ones depending on the use case scenario. The
concepts introduced in this paper were presented during the Rapid Mashup
Challenge at the International Conference on Web Engineering 2015. This paper
presents our approach, the scenario that was implemented for this challenge, as
well as the encountered issues during its preparation.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2016-01&engl=1}
}
@inbook {INBOOK-2015-06,
author = {Uwe Breitenb{\"u}cher and Tobias Binz and Oliver Kopp and Frank Leymann and Matthias Wieland},
title = {{Context-Aware Provisioning and Management of Cloud Applications}},
series = {Cloud Computing and Services Sciences},
publisher = {Springer International Publishing},
pages = {151--168},
type = {Article in Book},
month = {December},
year = {2015},
doi = {10.1007/978-3-319-25414-2_10},
isbn = {978-3-319-25413-5},
keywords = {Application Management; Provisioning; Context; Automation; Cloud Computing},
language = {English},
cr-category = {K.6 Management of Computing and Information Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Architecture of Application Systems},
abstract = {The automation of application provisioning and management is one of the most
important issues in Cloud Computing. However, the steadily increasing number of
different services and software components employed in composite Cloud
applications leads to a high risk of unintended side effects when different
technologies work together that bring their own proprietary management APIs.
Due to unknown dependencies and the increasing diversity and heterogeneity of
employed technologies, even small management tasks on a single component may
compromise the whole application functionality for reasons that are neither
expected nor obvious to non-experts. In this paper, we tackle these issues by
introducing a method that enables detecting and correcting unintended effects
of provisioning and management tasks in advance by analyzing the context in
which the tasks are executed. We validate the method practically and show how
context-aware expert management knowledge can be applied fully automatically to
provision and manage running Cloud applications.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2015-06&engl=1}
}
@inbook {INBOOK-2015-02,
author = {Eva Hoos and Christoph Gr{\"o}ger and Stefan Kramer and Bernhard Mitschang},
title = {{ValueApping: An Analysis Method to Identify Value-Adding Mobile Enterprise Apps in Business Processes}},
series = {Enterprise Information Systems},
publisher = {Springer International Publishing},
series = {Lecture Notes in Business Information Processing},
volume = {227},
type = {Article in Book},
month = {September},
year = {2015},
language = {English},
cr-category = {H.1.1 Systems and Information Theory},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Mobile enterprise apps provide novel possibilities for the optimization and
redesign of business processes, e.g., by the elimination of paper-based data
acquisitioning or ubiquitous access to up-to-date information. To leverage
these business potentials, a critical success factor is the identification and
evaluation of valueadding MEAs based on an analysis of the business process.
For this purpose, we present ValueApping, a systematic analysis method to
identify usage scenarios for value-adding mobile enterprise apps in business
processes and to analyze their business benefits. We describe the different
analysis steps and corresponding analysis artifacts of ValueApping and discuss
the results of a case-oriented evaluation in the automotive industry.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2015-02&engl=1}
}
@inbook {INBOOK-2013-02,
author = {Sylvia Radesch{\"u}tz and Holger Schwarz and Marko Vrhovnik and Bernhard Mitschang},
title = {{A Combination Framework for Exploiting the Symbiotic Aspects of Process and Operational Data in Business Process Optimization}},
series = {Information Reuse and Integration in Academia and Industry},
publisher = {Springer},
pages = {29--49},
type = {Article in Book},
month = {September},
year = {2013},
language = {German},
cr-category = {H.2 Database Management},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {A profound analysis of all relevant business data in a company is necessary for
optimizing business processes effectively. Current analyses typically run
either on business process execution data or on operational business data.
Correlations among the separate data sets have to be found manually under big
effort. However, to achieve a more informative analysis and to fully optimize a
company's business, an efficient consolidation of all major data sources is
indispensable. Recent matching algorithms are insufficient for this task since
they are restricted either to schema or to process matching. We present a new
matching framework to (semi-)automatically combine process data models and
operational data models for performing such a profound business analysis. We
describe the algorithms and basic matching rules underlying this approach as
well as an experimental study that shows the achieved high recall and
precision.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2013-02&engl=1}
}
@inbook {INBOOK-2013-01,
author = {Stefan Silcher and Max Dinkelmann and Jorge Minguez and Bernhard Mitschang},
title = {{Advanced Product Lifecycle Management by Introducing Domain-Specific Service Buses}},
series = {Enterprise Information Systems},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Business Information Processing},
volume = {141},
pages = {92--107},
type = {Article in Book},
month = {October},
year = {2013},
doi = {10.1007/978-3-642-40654-6_6},
isbn = {978-3-642-40653-9 (Print), 978-3-642-40654-6 (Online)},
keywords = {Product lifecycle management; Service-oriented architecture; Enterprise service bus; Modular IT integration},
language = {English},
cr-category = {D.2.11 Software Engineering Software Architectures,
D.2.13 Software Engineering Reusable Software},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Manufacturing companies are operating today in a turbulent market. Permanently
changing preconditions forces the companies to continuously adapt their
business and production processes to get the optimal productivity. Therefore, a
vast number of IT systems are introduced to support tasks along the product
life cycle. These systems are typically isolated and their communication,
cooperation and in special cases also integration results in more and more
overhead and gets quickly unmanageable. Further problems arise, when building
continuous processes within the Product Lifecycle Management (PLM). The
service-based PLM architecture faces these challenges and presents a
homogeneous integration approach based on Enterprise Service Bus (ESB)
technology. The characteristics and findings of our approach are presented and
the inclusion of security features is discussed. A proof-of-concept for the
production planning and the corresponding Production Planning Service Bus are
presented. Finally, the advantages of the service-based approach compared to
traditional integration solutions are pointed out.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2013-01&engl=1}
}
@inbook {INBOOK-2012-01,
author = {Stefan Silcher and Jorge Minguez and Bernhard Mitschang},
title = {{A Novel Approach to Product Lifecycle Management based on Service Hierarchies}},
series = {Recent Trends in Information Reuse and Integration},
address = {Vienna},
publisher = {Springer},
pages = {343--362},
type = {Article in Book},
month = {January},
year = {2012},
isbn = {978-3-7091-0738-6},
keywords = {Product Lifecycle Management; Service Oriented Architecture; Enterprise Service Bus},
language = {English},
cr-category = {D.2.11 Software Engineering Software Architectures,
D.2.13 Software Engineering Reusable Software},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {In grown IT infrastructures for Product Lifecycle Management (PLM),
applications are mostly interconnected using point-to-point interfaces. This
leads to complex and unmanageable infrastructures. A continuous and efficient
integration is a key requirement for successful PLM implementations. The
Service Oriented Architecture (SOA) is a prevalent solution to efficiently
integrate legacy applications and systems into business processes. Its
possibility for loose coupling of services enables the replacement of
point-to-point interfaces, this way reducing the complexity of managing and
maintaining the IT infrastructure. This article introduces a SOA-based solution
to the integration of all PLM phases. We employ an Enterprise Service Bus (ESB)
as service-based integration and communication infrastructure and introduce
three exemplary scenarios to illustrate the benefits of using an ESB as
compared to alternative PLM infrastructures. Furthermore, we describe a service
hierarchy that extends PLM functionality with value-added services by mapping
business processes to data integration services.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2012-01&engl=1}
}
@inbook {INBOOK-2009-04,
author = {Rodrigo Salvador Monteiro and Geraldo Zimbr{\~a}o and Jano Moreira de Souza and Holger Schwarz and Bernhard Mitschang},
title = {{Exploring Calendar-based Pattern Mining in Data Streams}},
series = {Complex Data Warehousing and Knowledge Discovery for Advanced Retrieval Development: Innovative Methods and Applications},
publisher = {IGI Global},
pages = {1--30},
type = {Article in Book},
month = {June},
year = {2009},
isbn = {978-1-60566-748-5},
language = {English},
cr-category = {H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Finally, Chapter XVI introduces a calendar-based pattern mining that aims at
identifying patterns on specific calendar partitions in continuous data
streams. The authors present how a data warehouse approach can be applied to
leverage calendar-based pattern mining in data streams and how the framework of
the DWFIST approach can cope with tight time constraints imposed by data
streams, keep storage requirements at a manageable level and, at the same time,
support calendar-based frequent itemset mining. The minimum granularity of
analysis, parameters of the data warehouse (e.g. mining minimum support) and
parameters of the database (e.g. extent size) provide ways to tune the load
performance.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2009-04&engl=1}
}
@inbook {INBOOK-2009-01,
author = {Cataldo Mega and Kathleen Krebs and Frank Wagner and Norbert Ritter and Bernhard Mitschang},
title = {{Content-Management-Systeme der n{\"a}chsten Generation}},
series = {Wissens- und Informationsmanagement ; Strategien, Organisation und Prozesse},
address = {Wiesbaden},
publisher = {Gabler Verlag},
pages = {539--567},
type = {Article in Book},
month = {January},
year = {2009},
isbn = {978-3-8349-0937-4},
language = {German},
cr-category = {H.3.2 Information Storage},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {...},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2009-01&engl=1}
}
@inbook {INBOOK-2008-16,
author = {Anne-Sophie Br{\"u}ggen and Sarah Jessen and Laura Kassner and Thorsten Liebelt and Yvonne Schweizer and Annika Weschler},
title = {{Imagination}},
series = {Kognition und Verhalten: Theory of Mind, Zeit, Imagination, Vergessen, Altruismus},
address = {M{\"u}nster},
publisher = {LIT-Verlag},
series = {Interdisziplin{\"a}re Forschungsarbeiten am FORUM SCIENTIARUM},
volume = {1},
pages = {85--128},
type = {Article in Book},
month = {January},
year = {2008},
isbn = {978-3-8258-1826-5},
keywords = {Imagination; Interdisziplin{\"a}re Forschung; K{\"u}nstliche Intelligenz},
language = {German},
cr-category = {A.m General Literature, Miscellaneous},
ee = {http://www.forum-scientiarum.uni-tuebingen.de/studium/studienkolleg/archiv/studienkolleg0607.html},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {'Die F{\"a}higkeit des Menschen zu denken ist Thema der Geisteswissenschaften,
aber auch der Psychologie, Anthropologie und zunehmend der Neurowissenschaften.
Dieser Sammelband, in dem die Abschlussarbeiten des ersten Jahrgangs des
Studienkollegs am Forum Scientiarum der Universit{\"a}t T{\"u}bingen dokumentiert
werden, besch{\"a}ftigt sich mit einigen ausgew{\"a}hlten Themen im Zusammenhang der
biologischen und kulturellen Grundlagen menschlichen Denkens.'
(Autorenreferat). Inhaltsverzeichnis: Judith Benz-Schwarzburg, Linda Braun,
Alexander Ecker, Tobias Kobitzsch, Christian L{\"u}cking: Theory of Mind bei
Mensch und Tier (I-50); Nina Baier, Christoph Paret, Sarah Wiethoff: Zeit und
Zeitbewusstsein (51-84); Anne-Sophie Br{\"u}ggen, Sarah Jessen, Laura Kassner,
Thorsten Liebelt, Yvonne Schweizer, Annika Weschler: Imagination (85-128);
Rainer Engelken, Kathleen Hildebrand, Nikolaus Schmitz, Silke Wagenh{\"a}user:
Vergessen als eine Grundlage menschlichen Denkens (129-176); Christian
G{\"a}ssler, Ralf J. Geretshauser, Bilal Hawa, Steffen Kudella, Sebastian Sehr,
Nora Umbach: Altruismus (177-211).},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2008-16&engl=1}
}
@inbook {INBOOK-2008-02,
author = {Clemens Dorda and Uwe Heinkel and Bernhard Mitschang},
title = {{A Concept for Applying Model-driven Engineering to Application Integration}},
series = {Challenges In Information Technology Management},
address = {Singapur},
publisher = {World Scientific},
pages = {168--174},
type = {Article in Book},
month = {May},
year = {2008},
isbn = {139789812819062},
isbn = {109812819061},
keywords = {Enterprise Application Integration, Model-Driven Engineering, Software Lifecycle, EAI, MDA, MDE, UML, Unified Modeling Language},
language = {English},
cr-category = {D.2.2 Software Engineering Design Tools and Techniques,
D.2.13 Software Engineering Reusable Software,
I.6.5 Model Development},
contact = {Clemens.Dorda@ipvs.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Modern software for Enterprise Application Integration (EAI) provides tools for
modeling integration scenarios. A drawback of these tools is the lack of
functionality for exchanging or integrating models from different EAI products.
Consequently, developers are only partially able to describe real heterogeneous
IT environments. Our goal is to avoid the creation of these so-called
``integration islands''. For that purpose we present an approach which introduces
an abstract view by technology-independent and multivendor-capable modeling for
both development and maintenance. With this approach, we propose a toolset- and
repository-based refinement of the abstract view to automate implementation
with real products and deployment on real platforms.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2008-02&engl=1}
}
@inbook {INBOOK-2006-02,
author = {Rodrigo Salvador Monteiro and Geraldo Zimbr{\~a}o and Holger Schwarz and Bernhard Mitschang and Jano Moreira De Souza},
title = {{DWFIST: The Data Warehouse of Frequent Itemsets Tactics Approach}},
series = {Processing and Managing Complex Data for Decision Support},
publisher = {Idea Group Publishing},
pages = {1--30},
type = {Article in Book},
month = {April},
year = {2006},
isbn = {1-59140-655-2},
language = {English},
cr-category = {H.2.7 Database Administration,
H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {This chapter presents the core of the DWFIST approach, which is concerned with
supporting the analysis and exploration of frequent itemsets and derived
patterns, e.g. association rules, in transactional datasets. The goal of this
new approach is to provide (1) flexible pattern-retrieval capabilities without
requiring the original data during the analysis phase, and (2) a standard
modeling for data warehouses of frequent itemsets allowing an easier
development and reuse of tools for analysis and exploration of itemset-based
patterns. Instead of storing the original datasets, our approach organizes
frequent itemsets holding on different partitions of the original transactions
in a data warehouse that retains sufficient information for future analysis. A
running example for mining calendar-based patterns on data streams is
presented. Staging area tasks are discussed and standard conceptual and logical
schemas are presented. Properties of this standard modeling allow to retrieve
frequent itemsets holding on any set of partitions along with upper and lower
bounds on their frequency counts. Furthermore, precision guarantees for some
interestingness measures of association rules are provided as well.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2006-02&engl=1}
}
@inbook {INBOOK-2005-01,
author = {Bernhard Mitschang and Daniela Nicklas and Matthias Grossmann and Thomas Schwarz and Nicola H{\"o}nle},
title = {{Federating Location-Based Data Services}},
series = {Data Management in a Connected World: Essays Dedicated to Hartmut Wedekind on the Occasion of His 70th Birthday},
address = {Berlin},
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
volume = {3551},
pages = {17--35},
type = {Article in Book},
month = {June},
year = {2005},
isbn = {3-540-26295-4},
keywords = {federation; context models; nexus; location-based services},
language = {English},
cr-category = {H.2.4 Database Management Systems,
H.3.3 Information Search and Retrieval,
H.3.5 Online Information Services},
ee = {http://www.nexus.uni-stuttgart.de,
http://dx.doi.org/10.1007/11499923_2},
contact = {mitsch@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {With the emerging availability of small and portable devices which are able to
determine their position and to communicate wirelessly, mobile and
spatially-aware applications become feasible. These applications rely on
information that is bound to locations and managed by so-called location-based
data services. Based on a classification of location-based data services we
introduce a service-oriented architecture that is built on a federation
approach to efficiently support location-based applications.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2005-01&engl=1}
}
@inbook {INBOOK-2004-08,
author = {Peter Breitling and Holger Schwarz and Mirka Zimmermann},
title = {{Verwaltung der Lehr- und Lerninhalte in der Metadatenbank MITO}},
series = {Information Technology Online: Online-gest{\"u}tztes Lehren und Lernen in informationstechnischen Studieng{\"a}ngen},
address = {M{\"u}nster, New York, M{\"u}nchen, Berlin},
publisher = {Waxmann},
pages = {187--199},
type = {Article in Book},
month = {June},
year = {2004},
isbn = {3-8309-1358-3},
language = {German},
cr-category = {H.0 Information Systems General},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Verwaltung der Lehr- und Lerninhalte in der Metadatenbank MITO},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2004-08&engl=1}
}
@inbook {INBOOK-2003-03,
author = {Carmen Constantinescu and Uwe Heinkel and Ralf Rantzau and Bernhard Mitschang},
title = {{A System For Data Change Propagation In Heterogeneous Information Systems}},
series = {Enterprise Information Systems IV},
address = {Dordrecht, Netherlands},
publisher = {Kluwer Academic Publishers},
pages = {51--59},
type = {Article in Book},
month = {January},
year = {2003},
isbn = {1-4020-1086-9},
keywords = {enterprise application integration; manufacturing; repository; propagation},
language = {English},
cr-category = {H.3.4 Information Storage and Retrieval Systems and Software},
contact = {uwe.heinkel@informatik.uni-stuttgart.de},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Today, it is common that enterprises manage several mostly heterogeneous
information systems to supply their production and business processes with
data. There is a need to exchange data between the information systems while
preserving system autonomy. Hence, an integration approach that relies on a
single global enterprise data schema is ruled out. This is also due to the
widespread usage of legacy systems. We propose a system, called Propagation
Manager, which manages dependencies between data objects stored in different
information systems. A script specifying complex data transformations and other
sophisticated activities, like the execution of external programs, is
associated with each dependency. For example, an object update in a source
system can trigger data transformations of the given source data for each
destination system that depends on the object. Our system is implemented using
current XML technologies. We present the architecture and processing model of
our system and demonstrate the benefit of our approach by illustrating an
extensive example scenario.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2003-03&engl=1}
}
@proceedings {PROC-2018-01,
author = {Nico Herzberg},
title = {{Proceedings of the 10th Central European Workshop on Services and their Composition (ZEUS 2018)}},
editor = {Christoph Hochreiner and Oliver Kopp and J{\"o}rg Lenhard},
publisher = {CEUR-WS.org},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {CEUR Workshop Proceedings},
volume = {2072},
pages = {76},
type = {Proceedings},
month = {April},
year = {2018},
issn = {1613-0073},
language = {English},
cr-category = {H.4.1 Office Automation},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/PROC-2018-01/PROC-2018-01.pdf,
http://ceur-ws.org/Vol-2072/},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems},
abstract = {In February 2018, we had the pleasure to organize the 10th edition of the ZEUS
Workshop in Dresden, Germany. This workshop series offers young researchers an
opportunity to present and discuss early ideas and work in progress as well as
to establish contacts among young researchers. For this year{\^a}€™s edition, we
selected eight regular submissions, two position papers, and one tool
demonstration by researchers from Belgium, Egypt, Germany, Italy, and
Switzerland for presentation at the workshop. Each submission went through a
thorough peer-review process and was assessed by at least three members of the
program committee with regard to its relevance and scientific quality. The
accepted contributions cover the areas of Microservices, Business Process
Management, and the Internet of Things. In addition, the workshop also hosted a
tool session to introduce early stage researchers to tools that ease the
literature research and the documentation of architectural decisions for
software systems.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=PROC-2018-01&engl=1}
}
@proceedings {PROC-2017-06,
editor = {Oliver Kopp and J{\"o}rg Lenhard and Cesare Pautasso},
title = {{Proceedings of the 9th Central European Workshop on Services and their Composition (ZEUS 2017)}},
publisher = {CEUR-WS.org},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {CEUR Workshop Proceedings},
volume = {1826},
pages = {92},
type = {Proceedings},
month = {May},
year = {2017},
language = {English},
cr-category = {H.4.1 Office Automation},
ee = {http://dblp.org/db/conf/zeus/zeus2017.html},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=PROC-2017-06&engl=1}
}
@proceedings {PROC-2017-05,
editor = {Bernhard Mitschang and Norbert Ritter and Holger Schwarz and Meike Klettke and Andreas Thor and Oliver Kopp and Matthias Wieland},
title = {{Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2017), 17. Fachtagung des GI-Fachbereichs ``Datenbanken und Informationssysteme'' (DBIS)}},
publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {LNI},
volume = {P266},
pages = {410},
type = {Proceedings},
month = {March},
year = {2017},
isbn = {978-3-88579-660-2},
language = {English},
cr-category = {H.4.1 Office Automation},
ee = {http://dblp.org/db/conf/btw/btw2017w.html},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems},
abstract = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2017) -- Workshopband},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=PROC-2017-05&engl=1}
}
@proceedings {PROC-2017-04,
editor = {Bernhard Mitschang and Daniela Nicklas and Frank Leymann and Harald Sch{\"o}ning and Melanie Herschel and Jens Teubner and Theo H{\"a}rder and Oliver Kopp and Matthias Wieland},
title = {{Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2017), 17. Fachtagung des GI-Fachbereichs ``Datenbanken und Informationssysteme'' (DBIS)}},
publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
series = {LNI},
volume = {P265},
pages = {637},
type = {Proceedings},
month = {March},
year = {2017},
isbn = {978-3-88579-659-6},
language = {English},
cr-category = {H.4.1 Office Automation},
ee = {http://dblp.org/db/conf/btw/btw2017.html},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems},
abstract = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2017)},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=PROC-2017-04&engl=1}
}
@proceedings {PROC-2011-01,
editor = {Theo H{\"a}rder and Wolfgang Lehner and Bernhard Mitschang and Harald Sch{\"o}ning and Holger Schwarz},
title = {{Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2011)}},
publisher = {GI},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {760},
type = {Proceedings},
month = {February},
year = {2011},
isbn = {978-3-88579-274-1},
language = {German},
cr-category = {H.2 Database Management},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems},
abstract = {The ”BTW” is a biennial conference series focusing on a broad range of topics
addressing database management for Business, Technology, and Web. BTW 2011 as
its 14th event took place in Kaiserslautern from March 2nd to 4th. This volume
contains 24 long and 6 short papers selected for presentation at the
conference, 9 industrial contributions, 3 papers or abstracts for the invited
talks, 12 demonstration proposals, a panel description, and a paper written by
the winner of the dissertation award. The subject areas include core database
technology such as query optimization and indexing, DBMS-related prediction
models, data streams, processing of large data sets, Web-based information
extraction, benchmarking and simulation, and others.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=PROC-2011-01&engl=1}
}
@proceedings {PROC-2009-02,
editor = {Kurt Rothermel and Dieter Fritsch and Wolfgang Blochinger and Frank D{\"u}rr},
title = {{Quality of Context -- Proceedings of the First International Workshop on Quality of Context (QuaCon 2009)}},
address = {Stuttgart, Germany},
publisher = {Springer},
institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
series = {Lecture Notes in Computer Science},
volume = {5786},
pages = {184},
type = {Proceedings},
month = {June},
year = {2009},
isbn = {978-3-642-04558-5},
keywords = {context-aware systems; location-based services; quality; context},
language = {English},
cr-category = {H.2.8 Database Applications,
H.3.3 Information Search and Retrieval,
H.3.5 Online Information Services},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
Universit{\"a}t Stuttgart, Institut f{\"u}r Photogrammetrie (ifp);
University of Stuttgart, Institute of Parallel and Distributed Systems, Distributed Systems;
University of Stuttgart, Institute for Natural Language Processing},
abstract = {Advances in sensor technology, wireless communication, and mobile devices lead
to the proliferation of sensors in our physical environment. At the same time
detailed digital models of buildings, towns, or even the globe become widely
available. Integrating the huge amount of sensor data into spatial models
results in highly dynamic models of the real world, often called context
models.
A wide range of applications can substantially benefit from context models.
However, context data are inherently associated with uncertainty. In general,
quality of context information has to be taken into account by both context
management and applications. For example, the accuracy, completeness, and
trustworthiness of spatial context information such as street or building data
are very important for navigation and guidance systems.
QuaCon 2009 was the first international scientific meeting that specifically
focused on the different aspects of quality of context data. Research in
context management and, in particular, context quality, requires an
interdisciplinary approach. Therefore, the QuaCon workshop aimed to bring
together researchers from various fields to discuss approaches to context
quality and to make a consolidated contribution toward an integrated way of
treating this topic. We received 19 high-quality paper submissions by
researchers from Europe, USA, and Asia. The International Program Committee
selected 11 papers for presentation at the workshop. Additionally, five invited
contributions by internationally renowned experts in the field were included in
the workshop program. The presentations at the workshop showed many facets of
quality of context from different research fields including context data
management, spatial models, context reasoning, privacy, and system frameworks.
The lively discussions underlined the great interest in this topic and in
particular led to a deeper understanding of the relations between the various
aspects of quality of context.
The success of QuaCon 2009 was the result of a team effort. We are grateful to
the members of the Program Committee and the external reviewers for their
thorough and timely reviews as well as to the authors for their high-quality
submissions and interesting talks. We would like to extend special thanks to
our invited speakers for their excellent and inspiring keynotes. Finally, we
wish to thank all persons involved in the organization of the QuaCon 2009
workshop who did really a great job.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=PROC-2009-02&engl=1}
}
@proceedings {PROC-2007-01,
editor = {Christian Becker and Christian S. Jensen and Daniela Nicklas and Jianwen Su},
title = {{Proceedings of the 8th International Conference on Mobile Data Management (MDM'07) : Mannheim, Germany, May 7-11, 2007}},
publisher = {IEEE Computer Society},
institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
pages = {232},
type = {Proceedings},
month = {May},
year = {2007},
isbn = {1-4244-1240-4},
keywords = {mobile data management},
language = {German},
cr-category = {E.2 Data Storage Representations,
H.2 Database Management},
ee = {http://mdm2007.uni-mannheim.de/},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Message from the General Chairs The last two decades have seen a radical change
in the way computers and computing are embedded in society. Historically, the
Personal Computer and the Internet were the milestones that changed both the
computer industry and the way society perceived computers. Today, mobility and
the pervasive use of computing devices have resulted in a fundamental change in
paradigm. Data is no longer static, residing on a disk and on a personal
computer. Now data is something available everywhere, through a multitude of
commodity devices and constantly broadcast through a variety of channels. The
impact on industry and on the way we work with data has become obvious. For
instance, portable digital music players are forcing the music industry to
change its business model, soon the same will happen with video and television.
Similarly, some major telecommunication companies have recently embarked in an
effort to replace the old phone network with an IP based solution for all forms
of communication, thereby blurring even further the boundaries between
communication and computation. The International Conference on Mobile Data
Management lies at the heart of all these changes. In the same way that the
Personal Computer and the first spreadsheet revolutionized the industry, the
new world of pervasive and ubiquitous access to information requires new
approaches to the way we deal with data. And these new approaches need
appropriate venues to be presented and discussed. Taking place in Mannheim,
Germany, MDM 2007 aims at serving as both as a top research conference as well
as a catalyst for new ideas and increased interactions between research and
industry. With an excellent technical program encompassing research papers and
demonstrators, and the complement of several seminars and workshops dedicated,
MDM 2007 has become the leading research venue for innovative research in all
aspects data management related to mobile, wearable, and pervasive computing.
The success of MDM 2007 is nevertheless not due solely to the importance of the
area. Organizing a conference requires the concerted effort of many individuals
during a prolonged period of time, often extending well beyond the conference
itself. As the Conference Chairs, we would like express our thanks and
gratitude to all those who have helped to make MDM 2007 possible. First and
foremost to the PC Chairs: Christian Becker, Christian Jensen, and Jianwen Su,
who have put together an excellent technical program. The program has been
complemented by an exciting demo track thanks to the efforts of Dieter Pfoser.
Panels, seminars, and workshops round up the program and have helped to make
the conference more attractive. The have been coordinated by Agnes Voisard,
Ralf Hartmut G{\"u}ting, and Birgitta K{\"o}nig-Ries, respectively. Special thanks go
to Daniela Nicklas for her efforts with the proceedings and to Torben Weis for
taking care of financial and budget matters. Klemens B{\"o}hm and Arno Jacobsen
were in charge of publicity and advertising the conference. Last but not least,
we want to thank the local organizers, who have done a great job in taking care
of all the menial but often cumbersome aspects of running a conference:
Christian Becker, Wolfgang Effelsberg, Armin Heinzl, and Martin Schader.
Looking forward to seeing you in Mannheim!
Gustavo Alonso and Pedro Jose Marron General Chairs},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=PROC-2007-01&engl=1}
}
@book {BOOK-2023-02,
editor = {Christoph Stach and Cl{\'e}mentine Gritti},
title = {{Security and Privacy in Blockchains and the IoT II}},
address = {Basel, Beijing, Wuhan, Barcelona, Belgrade, Novi Sad, Cluj, Manchester},
publisher = {MDPI},
series = {Future Internet},
pages = {480},
type = {Book},
month = {September},
year = {2023},
isbn = {978-3-0365-8772-1},
doi = {10.3390/books978-3-0365-8773-8},
keywords = {authentication; blockchain; demand-driven data provision; digital signatures; distributed ledger technology; encryption; Internet of Things; privacy-aware data processing; secure data management; smart things},
language = {English},
cr-category = {K.6.5 Security and Protection,
K.4.1 Computers and Society Public Policy Issues},
ee = {https://www.mdpi.com/books/book/7885},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de.},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems},
abstract = {In this age of data-driven transformation, where the fusion of blockchain
technologies and the Internet of Things (IoT) is shaping the fabric of our
digital society, the need for security and privacy has never been more
important. This Special Issue delves into the intricate confluence of these two
disruptive forces and provides a comprehensive overview of security and privacy
aspects in this regard. Focusing on protection goals such as confidentiality,
integrity, availability, and privacy, this compilation encapsulates the essence
of these multi-layered challenges. Ranging from complex data-driven
applications and smart services to novel approaches that enhance security and
privacy in the context of blockchain technologies and the IoT, the research
articles and literature reviews presented here offer a sophisticated mesh of
insights. Innovative solutions are highlighted from a variety of perspectives,
and challenges such as secure data transmission, confidential communication,
and tamper-proof data storage are explored.
In this way, this Special Issue is a beacon for practitioners, researchers, and
technology enthusiasts. Developers seeking to harness the potential of
blockchain technology and IoT find rich insights while users get a
comprehensive overview of the latest research and trends. The symphony of
interdisciplinary knowledge presented here creates a harmonious blend of theory
and practice, highlighting the intricate interdependencies between
technological advances and the need for security and privacy.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=BOOK-2023-02&engl=1}
}
@book {BOOK-2023-01,
editor = {Christoph Stach},
title = {{Security and Privacy in Blockchains and the IoT}},
address = {Basel, Beijing, Wuhan, Barcelona, Belgrade, Manchester, Tokyo, Cluj, Tianjin},
publisher = {MDPI},
series = {Future Internet},
pages = {166},
type = {Book},
month = {January},
year = {2023},
isbn = {978-3-0365-6251-3},
doi = {10.3390/books978-3-0365-6252-0},
keywords = {Blockchain; IoT; Confidentiality; Integrity; Authenticity; Access Control; Security; Privacy; Efficient Blockchain Technologies; Trustworthy Smart Services; Privacy-Aware Machine Learning; Data Protection Laws},
language = {English},
cr-category = {K.6.5 Security and Protection,
K.4.1 Computers and Society Public Policy Issues},
ee = {https://www.mdpi.com/books/book/6686},
contact = {Senden Sie eine E-Mail an Christoph.Stach@ipvs.uni-stuttgart.de.},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems},
abstract = {In this day and age, data have become an immensely valuable resource. They are
the key driver that puts the smart into smart services. This is fundamentally
fueled by two technological achievements, namely the Internet of Things (IoT),
which enables continuous and comprehensive collection of all kinds of data, and
blockchain technologies, which provide secure data management and exchange. In
addition to those information security measures, however, data privacy
solutions are also required to protect the involved sensitive data. In this
book, eight research papers address security and privacy challenges when
dealing with blockchain technologies and the IoT. Concerning the IoT, solutions
are presented on how IoT group communication can be secured and how trust
within IoT applications can be increased. In the context of blockchain
technologies, approaches are introduced on how query processing capabilities
can be enhanced and how a proof-of-work consensus protocol can be efficiently
applied in IoT environments. Furthermore, it is discussed how blockchain
technologies can be used in IoT environments to control access to confidential
IoT data as well as to enable privacy-aware data sharing. Finally, two reviews
give an overview of the state of the art in in-app activity recognition based
on convolutional neural networks and the prospects for blockchain technology
applications in ambient assisted living.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=BOOK-2023-01&engl=1}
}
@book {BOOK-2010-01,
author = {Holger Schwarz},
title = {{Anfragegenerierende Systeme: Anwendungsanalyse, Implementierungs- und Optimierungskonzepte}},
address = {Wiesbaden},
publisher = {Vieweg+Teubner},
pages = {201},
type = {Book},
month = {July},
year = {2010},
isbn = {978-3-8348-1298-8},
language = {German},
cr-category = {H.4 Information Systems Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institute of Parallel and Distributed Systems},
abstract = {Flexible Datenzugriffe sind in aktuellen Informationssystemen zur Erf{\"u}llung der
gestiegenen Nutzeranforderungen unabdingbar. In verschiedensten
Anwendungsgebieten werden daher generative Ans{\"a}tze genutzt, um spezifische
Anweisungen f{\"u}r Datenzugriffe bereitzustellen.
Holger Schwarz diskutiert Ans{\"a}tze der Anfragegenerierung und beleuchtet
Implementierungs- und Optimierungskonzepte. Die Generierungsans{\"a}tze erl{\"a}utert
er unter anderem an konkreten Anwendungsszenarien aus Bereichen wie Business
Intelligence, Workflow Management und Suchtechnologien. Das betrachtete
Spektrum erstreckt sich von den einzelnen Anwendungen bis hin zu Werkzeugen,
die bei der Anwendungsentwicklung zum Einsatz kommen. Zweiter Themenschwerpunkt
sind aktuelle Ans{\"a}tze zur Optimierung komplexer Strukturen generierter
Datenzugriffe. Diese werden vorgestellt und ihr Optimierungspotenzial an
Beispielszenarien aufgezeigt.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=BOOK-2010-01&engl=1}
}
@book {BOOK-2007-02,
editor = {Fabian Kaiser and Sven Schimpf and Holger Schwarz and Mih{\'a}ly Jakob and Severin Beucker},
title = {{Internetgest{\"u}tzte Expertenidentifikation zur Unterst{\"u}tzung der fr{\"u}hen Innovationsphasen}},
publisher = {Fraunhofer IRB Verlag},
pages = {30},
type = {Book},
month = {September},
year = {2007},
isbn = {978-3-8167-7448-8},
language = {German},
cr-category = {H.3.3 Information Search and Retrieval},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme},
abstract = {In dieser Brosch{\"u}re wird eine strukturierte Vorgehensweise beschrieben, mit der
f{\"u}r einen Innovationsprozess relevante Themenfelder im und mit Hilfe des
Internet identifiziert und spezifiziert, sowie, darauf aufbauend, Experten in
diesen Themenfeldern gefunden werden k{\"o}nnen. Im zweiten Teil wird die
informationstechnische Umsetzung dieser Vorgehensweise der internetgest{\"u}tzten
Identifikation von Experten dargestellt. Anhand eines Anwendungsfalls wird
gezeigt, wie Unternehmen durch diese Vorgehensweise im Bezug auf die Planung
von Technologiefeldern in den fr{\"u}hen Innovationsphasen unterst{\"u}tzt werden
k{\"o}nnen.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=BOOK-2007-02&engl=1}
}
@book {BOOK-2007-01,
editor = {Mih{\'a}ly Jakob and Dierk-Oliver Kiehne and Holger Schwarz and Fabian Kaiser and Severin Beucker},
title = {{Delphigest{\"u}tztes Szenario-Management und -Monitoring}},
publisher = {Fraunhofer IRB Verlag},
pages = {28},
type = {Book},
month = {September},
year = {2007},
isbn = {978-3-8167-7449-5},
language = {German},
cr-category = {H.4 Information Systems Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme},
abstract = {Die vorliegende Brosch{\"u}re ist Ergebnis des Forschungsprojektes nova-net:
Innovation in der Internet{\"o}konomie. Das Projekt nova-net entwickelt Theorien,
Strategien und Instrumente zur Unterst{\"u}tzung nachhaltiger Produkt- und
Serviceinnovationen in der Internet{\"o}konomie. Dabei wird davon ausgegangen, dass
das Internet sowohl eine Quelle als auch ein Instrument f{\"u}r nachhaltige
unternehmerische Innovationsprozesse darstellt. In der vorliegenden Brosch{\"u}re
wird der Ansatz des delphigest{\"u}tzten Szenario-Managements und seine
softwaretechnische Unterst{\"u}tzung durch SEMAFOR vorgestellt.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=BOOK-2007-01&engl=1}
}
@book {BOOK-2006-02,
author = {Mih{\'a}ly Jakob and Holger Schwarz and Fabian Kaiser},
title = {{Technologie-Roadmap}},
publisher = {Fraunhofer IRB Verlag},
pages = {92},
type = {Book},
month = {April},
year = {2006},
isbn = {3-8167-7047-9},
language = {German},
cr-category = {H.2.4 Database Management Systems,
H.2.7 Database Administration,
H.2.8 Database Applications},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Im Rahmen der vorliegenden Technologie-Roadmap werden Basis-, Integrations- und
Anwendungstechnologien aufgezeigt, die die nachhaltige Gestaltung von
Innovationsprozessen unterst{\"u}tzen k{\"o}nnen. Die einleitenden Kapitel stellen
sowohl den Innovationsprozess und grunds{\"a}tzliche Themenfelder, die im
Forschungsprojekt nova-net bearbeitet werden, dar als auch das so genannte
Information Workflow, das den Informationsverarbeitungsprozess in sinnvolle
Phasen unterteilt und die systematische L{\"o}sung von Innovationsproblemen
erm{\"o}glicht. Weiterf{\"u}hrende Teile der Arbeit ordnen f{\"u}r das
Innovationsmanagement relevante Technologien den einzelnen
Informationsverarbeitungsphasen zu, und widmen sich speziellen Problemen, die
in den Themenfeldern des Forschungsprojekts nova-net besondere Beachtung
verdienen.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=BOOK-2006-02&engl=1}
}
@book {BOOK-2006-01,
author = {Fabian Kaiser and Holger Schwarz and Mih{\'a}ly Jakob},
title = {{Internetbasierte Expertensuche}},
publisher = {Fraunhofer IRB Verlag},
pages = {29},
type = {Book},
month = {April},
year = {2006},
isbn = {3-8167-7042-8},
language = {German},
cr-category = {H.3.3 Information Search and Retrieval},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Dieses Arbeitspapier diskutiert Methoden und Techniken zur Suche von Personen
mit Expertise auf frei definierbaren Themenfeldern. Der Suchraum konzentriert
sich dabei auf das Internet und insbesondere die Dienste WWW und E-Mail. Es
wird aufgezeigt, weshalb mittels herk{\"o}mmliche Suchmaschinen im Allgemeinen
keine befriedigenden Ergebnisse erzielt werden k{\"o}nnen. Auf diesen
Schwachpunkten aufbauend werden verschiedene Techniken aufgezeigt, deren
Integration eine Expertensuche unterst{\"u}tzen. Im Vordergrund stehen dabei
Textklassifizierungssysteme, Crawler und die Integration des Wissens von
Suchmaschinen {\"u}ber Struktur und Inhalte des Word Wide Web.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=BOOK-2006-01&engl=1}
}
@book {BOOK-2004-01,
author = {Marco Litto and Izabela Korajda and Christoph Mangold and Ronald Angerbauer and Winfried Hils and Michael Lerche},
title = {{Baukastenbasiertes Engineering mit F{\"o}deral - Ein Leitfaden f{\"u}r Maschinen- und Anlagenbauer}},
address = {Frankfurt a.M},
publisher = {VDMA Verlag},
type = {Book},
month = {March},
year = {2004},
isbn = {3-8163-0478-8},
language = {German},
cr-category = {H.m Information Systems Miscellaneous},
department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
abstract = {Maschinen- und Anlagenhersteller haben das Ziel, die Standardisierung und den
Wiederholgrad im Engineering und der Produktion zu erh{\"o}hen. Dazu ist es
erforderlich, disziplin{\"u}bergreifend Baukastensysteme abzustimmen und zu
entwickeln. M{\"o}glich wird dies durch ein Projektierungswerkzeug, das
{\"u}bergeordnete Aufgaben des baukastenbasierten Engineerings zentral und
durchg{\"a}ngig unterst{\"u}tzt, es gleichzeitig aber auch erm{\"o}glicht, die Spezifika
und St{\"a}rken vorhandener Werkzeuge weiterhin zu nutzen.
Das F{\"o}deral-Projekt machte sich zum Ziel, den Maschinen- und Anlagenbauern ein
solches Projektierungswerkzeug zur Verf{\"u}gung zu stellen.
Das vorliegende Dokument soll die Ergebnisse des F{\"o}deral-Projekts weiteren
Firmen als Leitfaden f{\"u}r den Aufbau eigener Baukastensystematiken und
Baukastensysteme dienen. Dazu enth{\"a}lt er folgende Themenschwerpunkte:
- Darstellung der Grundlagen (F{\"o}deral-Methode, F{\"o}derale
Informations-Architektur etc.)
- Anwendung von Methode und Werkzeug am Beispiel einer Demonstrationsfr{\"a}sanlage
- Anwendung von Methode und Werkzeug bei den Firmen Homag und Nagel
- Erfahrungen der F{\"o}deral-Partner},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=BOOK-2004-01&engl=1}
}
@book {BOOK-1999-01,
editor = {Kerstin Schneider},
title = {{Die Informatik AG - Telekooperation, 2., durchges. Aufl. 1999}},
publisher = {Teubner},
pages = {158},
type = {Book},
month = {January},
year = {1999},
isbn = {3-519-12194-8},
language = {German},
cr-category = {H Information Systems},
department = {University of Stuttgart, Institute of Parallel and Distributed High-Performance Systems, Applications of Parallel and Distributed Systems;
University of Stuttgart, Institut f{\"u}r Parallele und Verteilte H{\"o}chstleistungsrechner},
abstract = {Das Bild der Informatik ist bei Jugendlichen von Ger{\"a}ten und konkreten Systemen
gepr{\"a}gt. Dies schreckt junge Frauen davon ab, sich mit der Informatik zu
befassen oder Informatik zu studieren. Soll dies korrigiert werden, so mu{\ss} man
insbesondere den Sch{\"u}lerinnen ein zutreffendes Bild von den Inhalten undder
Methodik der Informatik vermitteln. Aus diesem Grund werden seit 1977 an der
Fakult{\"a}t Informatik der Universit{\"a}t Stuttgart Veranstaltungen f{\"u}r Sch{\"u}lerinnen
der Oberstufe durchgef{\"u}hrt mit dem Ziel, diese {\"u}ber die Prinzipien,
Vorgehensweisen und {\"u}ber das Studium der Informatik zu informieren. Das
vorliegende Buch beschreibt die Konzepte und Inhalte der Veranstaltung Die
Informatik AG - Telekooperation und enth{\"a}lt Auswertungen und Erfahrungen der
AG. Eseignet sich einerseits als interessante Einf{\"u}hrung in die Grundlagen der
Rechnernetze, das Internet und seine Dienste, insbesondere das WWW und
elektronische Post, und soll andererseits als Vorlage zur Durchf{\"u}hrung
{\"a}hnlicher Projekte im universit{\"a}ren und schulischen Bereich dienen. Zielgruppe
sind Lehrer(innen) der Informatik und solche, die das Internet an Schulen
einf{\"u}hren, Dozent(inn)en an Hochschulen, die {\"a}hnliche Kurse ausrichten und
nat{\"u}rlich Sch{\"u}ler und Sch{\"u}lerinnen.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=BOOK-1999-01&engl=1}
}