@inproceedings {INPROC-2020-06,
   author = {Cornelia Kiefer and Peter Reimann and Bernhard Mitschang},
   title = {{Prevent Low-Quality Analytics by Automatic Selection of the Best-Fitting Training Data}},
   booktitle = {Proceedings of the 53rd Hawaii International Conference on System Sciences (HICSS)},
   address = {Maui, Hawaii, USA},
   publisher = {Online},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {1036--1045},
   type = {Konferenz-Beitrag},
   month = {Januar},
   year = {2020},
   isbn = {978-0-9981331-3-3},
   keywords = {data quality; domain-specific data analysis; text analysis; text similarity; training data},
   language = {Englisch},
   cr-category = {I.2.7 Natural Language Processing},
   ee = {https://scholarspace.manoa.hawaii.edu/bitstream/10125/63868/0103.pdf},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Data analysis pipelines consist of a sequence of various analysis tools. Most of these tools are based on supervised machine learning techniques and thus rely on labeled training data. Selecting appropriate training data has a crucial impact on analytics quality. Yet, most of the times, domain experts who construct analysis pipelines neglect the task of selecting appropriate training data. They rely on default training data sets, e.g., since they do not know which other training data sets exist and what they are used for. Yet, default training data sets may be very different from the domain-specific input data that is to be analyzed, leading to low-quality results. Moreover, these input data sets are usually unlabeled. Thus, information on analytics quality is not measurable with evaluation metrics. Our contribution comprises a method that (1) indicates the expected quality to the domain expert while constructing the analysis pipeline, without need for labels and (2) automatically selects the best-fitting training data. It is based on a measurement of the similarity between input and training data. In our evaluation, we consider the part-of-speech tagger tool and show that Latent Semantic Analysis (LSA) and Cosine Similarity are suited as indicators for the quality of analysis results and as basis for an automatic selection of the best-fitting training data.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2020-06&engl=0}
}
@inproceedings {INPROC-2019-08,
   author = {Cornelia Kiefer and Peter Reimann and Bernhard Mitschang},
   title = {{A Hybrid Information Extraction Approach Exploiting Structured Data Within a Text Mining Process}},
   booktitle = {18. Fachtagung des GI-Fachbereichs ,,Datenbanken und Informationssysteme (DBIS), 4.-8. M{\"a}rz 2019, Rostock, Germany, Proceedings.},
   editor = {Torsten Grust and Felix Naumann and Alexander B{\"o}hm and Wolfgang Lehner and Theo H{\"a}rder and Erhard et al. Rahm},
   address = {Bonn},
   publisher = {Gesellschaft f$\backslash$``{u}r Informatik e.V. (GI)},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {149--168},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {2019},
   keywords = {information extraction; clustering; text mining; free text fields},
   language = {Englisch},
   cr-category = {I.2.7 Natural Language Processing},
   ee = {https://doi.org/10.18420/btw2019-10},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Many data sets encompass structured data fields with embedded free text fields. The text fields allow customers and workers to input information which cannot be encoded in structured fields. Several approaches use structured and unstructured data in isolated analyses. The result of isolated mining of structured data fields misses crucial information encoded in free text. The result of isolated text mining often mainly repeats information already available from structured data. The actual information gain of isolated text mining is thus limited. The main drawback of both isolated approaches is that they may miss crucial information. The hybrid information extraction approach suggested in this paper adresses this issue. Instead of extracting information that in large parts was already available beforehand, it extracts new, valuable information from free texts. Our solution exploits results of analyzing structured data within the text mining process, i.e., structured information guides and improves the information extraction process on textual data. Our main contributions comprise the description of the concept of hybrid information extraction as well as a prototypical implementation and an evaluation with two real-world data sets from aftersales and production with English and German free text fields.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2019-08&engl=0}
}
@inproceedings {INPROC-2019-07,
   author = {Cornelia Kiefer},
   title = {{Quality Indicators for Text Data}},
   booktitle = {18. Fachtagung des GI-Fachbereichs ,,Datenbanken und Informationssysteme (DBIS), 4.-8. M{\"a}rz 2019, Rostock, Germany, Workshopband.},
   editor = {Holger Meyer and Norbert Ritter and Andreas Thor and Daniela Nicklas and Andreas Heuer and Meike Klettke},
   address = {Bonn},
   publisher = {Gesellschaft f$\backslash$``{u}r Informatik e.V. (GI)},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Dagstuhl Reports},
   pages = {145--154},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {2019},
   keywords = {data quality; text data quality; text mining; text analysis; quality indicators for text data},
   language = {Englisch},
   cr-category = {I.2.7 Natural Language Processing},
   ee = {https://doi.org/10.18420/btw2019-ws-15},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Textual data sets vary in terms of quality. They have different characteristics such as the average sentence length or the amount of spelling mistakes and abbreviations. These text characteristics have influence on the quality of text mining results. They may be measured automatically by means of quality indicators. We present indicators, which we implemented based on natural language processing libraries such as Stanford CoreNLP and NLTK. We discuss design decisions in the implementation of exemplary indicators and provide all indicators on GitHub. In the evaluation, we investigate freetexts from production, news, prose, tweets and chat data and show that the suggested indicators predict the quality of two text mining modules.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2019-07&engl=0}
}
@inproceedings {INPROC-2017-18,
   author = {Cornelia Kiefer},
   title = {{Die Gratwanderung zwischen qualitativ hochwertigen und einfach zu erstellenden dom{\"a}nenspezifischen Textanalysen}},
   booktitle = {GI-Edition Lecture Notes in Informatics Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2017) Workshopband},
   editor = {B. Mitschang et al.},
   address = {Bonn},
   publisher = {Gesellschaft f{\"u}r Informatik},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {99--104},
   type = {Workshop-Beitrag},
   month = {M{\"a}rz},
   year = {2017},
   keywords = {Textanalyse, Datenqualit{\"a}t, Analysequalit{\"a}t, {\"u}berwachte maschinelle Lernverfahren, Textanalyse in den Geisteswissenschaften},
   language = {Deutsch},
   cr-category = {H.3 Information Storage and Retrieval},
   ee = {http://btw2017.informatik.uni-stuttgart.de/pro/P-266-BTW2017-Workshopband.pdf,     http://btw2017.informatik.uni-stuttgart.de/slidesandpapers/E1-12/paper_web.pdf},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Die Textanalyse ist zu einem entscheidenden Werkzeug in verschiedenen Dom{\"a}nen wie den Geisteswissenschaften, Naturwissenschaften sowie auch in der Industrie geworden. Eine der gr{\"o}{\ss}ten Herausforderungen bei dom{\"a}nenspezifischen Textanalyseprojekten besteht darin, das Wissen aus den Bereichen IT und Text Mining mit dem Wissen aus der Dom{\"a}ne zusammenzubringen. Viele Textanalysetoolkits werden deshalb speziell f{\"u}r den Gebrauch durch Dom{\"a}nenexperten ohne oder mit wenig IT und Textanalysewissen vereinfacht. In diesem Beitrag diskutieren wir, inwiefern diese Vereinfachungen zu Qualit{\"a}tsproblemen bei der Analyse von unsauberen Daten f{\"u}hren k{\"o}nnen.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2017-18&engl=0}
}
@inproceedings {INPROC-2016-29,
   author = {Cornelia Kiefer},
   title = {{Assessing the Quality of Unstructured Data: An Initial Overview}},
   booktitle = {Proceedings of the LWDA 2016 Proceedings (LWDA)},
   editor = {Ralf Krestel and Davide Mottin and Emmanuel M{\"u}ller},
   address = {Aachen},
   publisher = {CEUR Workshop Proceedings},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {62--73},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2016},
   isbn = {1613-0073},
   keywords = {quality of unstructured data, quality of text data, data, quality dimensions, data quality assessment, data quality metrics},
   language = {Englisch},
   cr-category = {A.1 General Literature, Introductory and Survey,     I.2.7 Natural Language Processing},
   ee = {http://ceur-ws.org/Vol-1670/paper-25.pdf,     http://ceur-ws.org/Vol-1670/},
   contact = {cornelia.kiefer@gsame.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {In contrast to structured data, unstructured data such as texts, speech, videos and pictures do not come with a data model that enables a computer to use them directly. Nowadays, computers can interpret the knowledge encoded in unstructured data using methods from text analytics, image recognition and speech recognition. Therefore, unstructured data are used increasingly in decision-making processes. But although decisions are commonly based on unstructured data, data quality assessment methods for unstructured data are lacking. We consider data analysis pipelines built upon two types of data consumers, human consumers that usually come at the end of the pipeline and non-human / machine consumers (e.g., natural language processing modules such as part of speech tagger and named entity recognizer) that mainly work intermediate. We define data quality of unstructured data via (1) the similarity of the input data to the data expected by these consumers of unstructured data and via (2) the similarity of the input data to the data representing the real world. We deduce data quality dimensions from the elements in analytic pipelines for unstructured data and characterize them. Finally, we propose automatically measurable indicators for assessing the quality of unstructured text data and give hints towards an implementation.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-29&engl=0}
}
@inproceedings {INPROC-2016-07,
   author = {Christoph Gr{\"o}ger and Laura Kassner and Eva Hoos and Jan K{\"o}nigsberger and Cornelia Kiefer and Stefan Silcher and Bernhard Mitschang},
   title = {{The Data-Driven Factory. Leveraging Big Industrial Data for Agile, Learning and Human-Centric Manufacturing}},
   booktitle = {Proceedings of the 18th International Conference on Enterprise Information Systems},
   editor = {Slimane Hammoudi and Leszek Maciaszek and Michele M. Missikoff and Olivier Camp and Jose Cordeiro},
   publisher = {SciTePress},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {40--52},
   type = {Konferenz-Beitrag},
   month = {April},
   year = {2016},
   isbn = {978-989-758-187-8},
   keywords = {IT Architecture, Data Analytics, Big Data, Smart Manufacturing, Industrie 4.0},
   language = {Englisch},
   cr-category = {H.4.0 Information Systems Applications General,     J.2 Physical Sciences and Engineering},
   contact = {Email an Christoph.Groeger@ipvs.uni-stuttgart.de oder laura.kassner@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Global competition in the manufacturing industry is characterized by ever shorter product life cycles, increas-ing complexity and a turbulent environment. High product quality, continuously improved processes as well as changeable organizational structures constitute central success factors for manufacturing companies. With the rise of the internet of things and Industrie 4.0, the increasing use of cyber-physical systems as well as the digitalization of manufacturing operations lead to massive amounts of heterogeneous industrial data across the product life cycle. In order to leverage these big industrial data for competitive advantages, we present the concept of the data-driven factory. The data-driven factory enables agile, learning and human-centric manu-facturing and makes use of a novel IT architecture, the Stuttgart IT Architecture for Manufacturing (SITAM), overcoming the insufficiencies of the traditional information pyramid of manufacturing. We introduce the SITAM architecture and discuss its conceptual components with respect to service-oriented integration, ad-vanced analytics and mobile information provisioning in manufacturing. Moreover, for evaluation purposes, we present a prototypical implementation of the SITAM architecture as well as a real-world application sce-nario from the automotive industry to demonstrate the benefits of the data-driven factory.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-07&engl=0}
}
@inproceedings {INPROC-2015-52,
   author = {Ulrike Pado and Cornelia Kiefer},
   title = {{Short Answer Grading: When Sorting Helps and When it Doesn’t}},
   booktitle = {Proceedings of the 4th workshop on NLP for Computer Assisted Language Learning, NODALIDA 2015},
   editor = {Link{\"o}pings universitet Link{\"o}ping University Electronic Press},
   address = {Wilna},
   publisher = {LiU Electronic Press and ACL Anthology},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   series = {Link{\"o}ping Electronic Conference Proceedings},
   pages = {42--50},
   type = {Workshop-Beitrag},
   month = {Mai},
   year = {2015},
   isbn = {978-91-7519-036-5},
   keywords = {short-answer grading; assisted grading; short-answer corpora},
   language = {Englisch},
   cr-category = {J Computer Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Automatic short-answer grading promises improved student feedback at reduced teacher effort both during and after in- struction. Automated grading is, how- ever, controversial in high-stakes testing and complex systems can be difficult to set up by non-experts, especially for fre- quently changing questions. We propose a versatile, domain-independent system that assists manual grading by pre-sorting an- swers according to their similarity to a ref- erence answer. We show near state-of- the-art performance on the task of auto- matically grading the answers from CREG (Meurers et al., 2011). To evaluate the grader assistance task, we present CSSAG (Computer Science Short Answers in Ger- man), a new corpus of German computer science questions answered by natives and highly-proficient non-natives. On this cor- pus, we demonstrate the positive influence of answer sorting on the slowest-graded, most complex-to-assess questions.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2015-52&engl=0}
}
@inproceedings {INPROC-2015-45,
   author = {Laura Kassner and Cornelia Kiefer},
   title = {{Taxonomy Transfer: Adapting a Knowledge Representing Resource to new Domains and Tasks}},
   booktitle = {Proceedings of the 16th European Conference on Knowledge Management},
   publisher = {acpi Online},
   institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
   pages = {399--407},
   type = {Konferenz-Beitrag},
   month = {September},
   year = {2015},
   keywords = {taxonomy; ontology; ontology population; semantic resources; domain-specific language},
   language = {Englisch},
   cr-category = {I.2.7 Natural Language Processing,     I.2.4 Knowledge Representation Formalisms and Methods,     J.7 Computers in Other Systems},
   contact = {Email an laura.kassner@ipvs.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Today, data from different sources and different phases of the product life cycle are usually analyzed in isolation and with considerable time delay. Real-time integrated analytics is especially beneficial in a production context. We present an architecture fordata- and analytics-driven exception escalation in manufacturing and show the advantages of integrating unstructured data.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2015-45&engl=0}
}
@article {ART-2019-10,
   author = {Cornelia Kiefer and Peter Reimann and Bernhard Mitschang},
   title = {{QUALM: Ganzheitliche Messung und Verbesserung der Datenqualit{\"a}t in der Textanalyse}},
   journal = {Datenbank-Spektrum},
   publisher = {Springer Verlag},
   pages = {1--12},
   type = {Artikel in Zeitschrift},
   month = {Juni},
   year = {2019},
   doi = {https://doi.org/10.1007/s13222-019-00318-7},
   keywords = {Datenqualit{\"a}t; Textanalyse; Text Mining; Trainingsdaten; Semantische Ressourcen},
   language = {Deutsch},
   cr-category = {H.3 Information Storage and Retrieval},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Bestehende Ans{\"a}tze zur Messung und Verbesserung der Qualit{\"a}t von Textdaten in der Textanalyse bringen drei gro{\ss}e Nachteile mit sich. Evaluationsmetriken wie zum Beispiel Accuracy messen die Qualit{\"a}t zwar verl{\"a}sslich, sie (1) sind jedoch auf aufw{\"a}ndig h{\"a}ndisch zu erstellende Goldannotationen angewiesen und (2) geben keine Ansatzpunkte f{\"u}r die Verbesserung der Qualit{\"a}t. Erste dom{\"a}nenspezifische Datenqualit{\"a}tsmethoden f{\"u}r unstrukturierte Textdaten kommen zwar ohne Goldannotationen aus und geben Ansatzpunkte zur Verbesserung der Datenqualit{\"a}t. Diese Methoden wurden jedoch nur f{\"u}r begrenzte Anwendungsgebiete entwickelt und (3) ber{\"u}cksichtigen deshalb nicht die Spezifika vieler Analysetools in Textanalyseprozessen. In dieser Arbeit pr{\"a}sentieren wir hierzu das QUALM-Konzept zum qualitativ hochwertigen Mining von Textdaten (QUALity Mining), das die drei o.g. Nachteile adressiert. Das Ziel von QUALM ist es, die Qualit{\"a}t der Analyseergebnisse, z. B. bzgl. der Accuracy einer Textklassifikation, auf Basis einer Messung und Verbesserung der Datenqualit{\"a}t zu erh{\"o}hen. QUALM bietet hierzu eine Menge an QUALM-Datenqualit{\"a}tsmethoden. QUALM-Indikatoren erfassen die Datenqualit{\"a}t ganzheitlich auf Basis der Passung zwischen den Eingabedaten und den Spezifika der Analysetools, wie den verwendeten Features, Trainingsdaten und semantischen Ressourcen (wie zum Beispiel W{\"o}rterb{\"u}chern oder Taxonomien). Zu jedem Indikator geh{\"o}rt ein passender Modifikator, mit dem sowohl die Daten als auch die Spezifika der Analysetools ver{\"a}ndert werden k{\"o}nnen, um die Datenqualit{\"a}t zu erh{\"o}hen. In einer ersten Evaluation von QUALM zeigen wir f{\"u}r konkrete Analysetools und Datens{\"a}tze, dass die Anwendung der QUALM-Datenqualit{\"a}tsmethoden auch mit einer Erh{\"o}hung der Qualit{\"a}t der Analyseergebnisse im Sinne der Evaluationsmetrik Accuracy einhergeht. Die Passung zwischen Eingabedaten und Spezifika der Analysetools wird hierzu mit konkreten QUALM-Modifikatoren erh{\"o}ht, die zum Beispiel Abk{\"u}rzungen aufl{\"o}sen oder automatisch auf Basis von Text{\"a}hnlichkeitsmetriken passende Trainingsdaten vorschlagen.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2019-10&engl=0}
}
@article {ART-2015-10,
   author = {Cornelia Kiefer and Ulrike Pado},
   title = {{Freitextaufgaben in Online-Tests – Bewertung und Bewertungsunterst{\"u}tzung}},
   journal = {HMD Praxis der Wirtschaftsinformatik},
   publisher = {Springer},
   volume = {52},
   number = {1},
   pages = {96--107},
   type = {Artikel in Zeitschrift},
   month = {Januar},
   year = {2015},
   isbn = {10.1365/s40702-014-0104-2},
   language = {Deutsch},
   cr-category = {J Computer Applications},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {Der Einsatz von eLearning-Szenarien bietet viele innovative M{\"o}glichkeiten f{\"u}r die Wissensvermittlung. Spezielle eLearning-Tools dienen dazu, Lernressourcen, interaktive Elemente sowie Interaktions- und Kommunikationsm{\"o}glichkeiten bereitzustellen und zu kombinieren. So wird selbstgesteuertes, asynchrones Lernen m{\"o}glich, methodisch erschlie{\ss}en sich neue Wege und hohe Aufw{\"a}nde f{\"u}r gro{\ss}e Lerngruppen k{\"o}nnen sinken. In diesem Zusammenhang stellt sich die Frage, welchen Nutzen die computergest{\"u}tzte Umsetzung von Lernstands{\"u}berpr{\"u}fungen (Tests und Klausuren) f{\"u}r Dozenten und Lernende haben kann. Stark assoziiert mit Tests im eLearning-Bereich sind Multiple-Choice-Aufgaben. Als automatisch korrigierbare Fragen k{\"o}nnen sie im eLearning-Umfeld schnell und objektiv bewertet werden und liefern auch bei gro{\ss}en Teilnehmerzahlen schnell Feedback an Lernende und Dozenten. Gleichzeitig zweifeln viele Dozenten daran, dass diese Frageform die geforderten Kenntnisse und F{\"a}higkeiten wirklich wiederspiegeln und bef{\"u}rchten ungerechtfertigte Erfolge durch Raten. Freitextfragen umgehen diese Probleme und bieten den Mehrwert einer klareren Einsicht in die Denkweise des Pr{\"u}flings, doch ist ihre Korrektur zeitaufw{\"a}ndig und oft subjektiv. Wir geben Hinweise f{\"u}r die Praxis, die die Bewertung von Freitextaufgaben verbessern und beschleunigen helfen, und illustrieren unsere {\"U}berlegungen an einem realen Datensatz von Freitextfragen und Antworten, der im Verlauf einer Einf{\"u}hrungsveranstaltung in die Programmierung f{\"u}r Informatiker und Wirtschaftsinformatiker gewonnen wurde. Abschlie{\ss}end stellen wir unsere noch andauernde Arbeit an einem System zur halbautomatischen Bewerterunterst{\"u}tzung vor, das vom computerbasierten Umfeld im eLearning-Bereich profitiert und sowohl den Zeitaufwand f{\"u}r die manuelle Bewertung als auch die Replizierbarkeit der Bewertungen weiter optimieren soll.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2015-10&engl=0}
}
@inbook {INBOOK-2017-04,
   author = {Laura Kassner and Christoph Gr{\"o}ger and Jan K{\"o}nigsberger and Eva Hoos and Cornelia Kiefer and Christian Weber and Stefan Silcher and Bernhard Mitschang},
   title = {{The Stuttgart IT Architecture for Manufacturing}},
   series = {Enterprise Information Systems: 18th International Conference, ICEIS 2016, Rome, Italy, April 25--28, 2016, Revised Selected Papers},
   publisher = {Springer International Publishing},
   series = {Lecture Notes in Business Information Processing},
   volume = {291},
   pages = {53--80},
   type = {Beitrag in Buch},
   month = {Juni},
   year = {2017},
   isbn = {978-3-319-62386-3},
   doi = {10.1007/978-3-319-62386-3_3},
   language = {Englisch},
   cr-category = {H.4.0 Information Systems Applications General,     D.2.12 Software Engineering Interoperability,     J.2 Physical Sciences and Engineering},
   ee = {https://link.springer.com/chapter/10.1007/978-3-319-62386-3_3},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Anwendersoftware},
   abstract = {The global conditions for manufacturing are rapidly changing towards shorter product life cycles, more complexity and more turbulence. The manufacturing industry must meet the demands of this shifting environment and the increased global competition by ensuring high product quality, continuous improvement of processes and increasingly flexible organization. Technological developments towards smart manufacturing create big industrial data which needs to be leveraged for competitive advantages. We present a novel IT architecture for data-driven manufacturing, the Stuttgart IT Architecture for Manufacturing (SITAM). It addresses the weaknesses of traditional manufacturing IT by providing IT systems integration, holistic data analytics and mobile information provisioning. The SITAM surpasses competing reference architectures for smart manufacturing because it has a strong focus on analytics and mobile integration of human workers into the smart production environment and because it includes concrete recommendations for technologies to implement it, thus filling a granularity gap between conceptual and case-based architectures. To illustrate the benefits of the SITAM{\^a}€™s prototypical implementation, we present an application scenario for value-added services in the automotive industry.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2017-04&engl=0}
}