Publikationen VS: Bibliographie 2016 BibTeX
@inproceedings {INPROC-2016-41,
author = {Stephan Schnitzer and Simon Gansel and Frank D{\"u}rr and Kurt Rothermel},
title = {{Real-time scheduling for 3D GPU rendering}},
booktitle = {11th IEEE International Symposium on Industrial Embedded Systems (SIES)},
publisher = {IEEE},
institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
pages = {1--10},
type = {Konferenz-Beitrag},
month = {Mai},
year = {2016},
doi = {10.1109/SIES.2016.7509411},
keywords = {automobiles; graphical user interfaces; graphics processing units; rendering (computer graphics); 3D GPU rendering; GPU sharing; real-time GPU scheduling framework; real-time scheduling; automotive HMI; embedded systems},
language = {Englisch},
cr-category = {H.5.2 Information Interfaces and Presentation User Interfaces,
I.3.m Computer Graphics Miscellaneous},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2016-41/INPROC-2016-41.pdf,
http://dx.doi.org/10.1109/SIES.2016.7509411},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Verteilte Systeme},
abstract = {3D graphical functions in cars enjoy growing popularity. For instance, analog
instruments of the instrument cluster are replaced by digital 3D displays as
shown by Mercedes-Benz in the F125 prototype car. The trend to use 3D
applications expands into two directions: towards more safety-relevant
applications such as the speedometer and towards third-party applications,
e.g., from an app store. In order to save cost, energy, and installation space,
all these applications should share a single GPU. GPU sharing brings up the
problem of providing real-time guarantees for rendering content of
time-sensitive applications like the speedometer. To solve this problem, we
present a real-time GPU scheduling framework which provides strong guarantees
for critical applications while still giving as much GPU resources to less
important applications as possible, thus ensuring a high GPU utilization. Since
current GPUs are not preemptible, we use the estimated execution time of each
GPU rendering job to make the scheduling decisions. Our evaluations show that
our scheduler guarantees given real-time constraints, while achieving a high
GPU utilization of 97\%. Moreover, scheduling is performed highly efficient in
real-time with less than 10 us latency.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-41&engl=0}
}
@inproceedings {INPROC-2016-34,
author = {Florian Berg and Frank D{\"u}rr and Kurt Rothermel},
title = {{Increasing the Efficiency of Code Offloading in n-tier Environments with Code Bubbling}},
booktitle = {Proceedings of the 13th Annual International Conference on Mobile and Ubiquitous Systems: Computing, Networking and Services},
publisher = {-},
institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
type = {Konferenz-Beitrag},
month = {November},
year = {2016},
isbn = {978-1-4503-4750-1/16/11},
doi = {dx.doi.org/10.1145/2994374.2994375},
keywords = {Mobile Cloud Computing; Multi-tier Code Offloading; Cyber Foraging; Code Bubbling; n-tier environment},
language = {Englisch},
cr-category = {C.2.4 Distributed Systems},
contact = {Florian.Berg@ipvs.uni-stuttgart.de},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Verteilte Systeme},
abstract = {Code offloading strives for increasing the energy efficiency and execution
speed of mobile applications on resource-constrained mobile devices. First
approaches considered only a code offloading between two (or three) tiers,
executing code either locally on the mobile device or remotely on a powerful
server in the vicinity or in a distant cloud. However, new execution
environments comprise multiple tiers, containing highly distributed
heterogeneous resources. We present in this paper our Code Bubbling Offload
System (CoBOS). CoBOS targets n-tier environments containing highly distributed
heterogeneous resources with different performance characteristics and cost
implications. In such n-tier environments, it is very costly for a
resource-constrained mobile device to gather a global view on available
resources. As a result, we propose the novel concept of code bubbling. Code
bubbling moves code dynamically and adaptively towards more powerful and more
distant tiers, enabling an efficient and scalable code offloading in n-tier
environments. Each tier makes autonomous decisions to execute code in the tier
or forward it further to the next tier. To support such a recursive escalation
of code along autonomous tiers, CoBOS offloads self-contained offload requests
that possess all of the required information for the processing. Our real-world
evaluation shows that CoBOS decreases the energy consumption by 77\% and the
execution time by 83\% for code offloading in n-tier environments.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-34&engl=0}
}
@inproceedings {INPROC-2016-32,
author = {Frank D{\"u}rr and Naresh Ganesh Nayak},
title = {{No-wait Packet Scheduling for IEEE Time-sensitive Networks (TSN)}},
booktitle = {24th International Conference on Real-Time Networks and Systems, RTNS-2016},
address = {Brest, France},
publisher = {ACM},
institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
pages = {1--10},
type = {Konferenz-Beitrag},
month = {Oktober},
year = {2016},
keywords = {Time-sensitive network, TSN, Real-time communication, Job shop scheduling problem, Tabu search, IEEE 802.1Qbv},
language = {Englisch},
cr-category = {C.2.1 Network Architecture and Design,
C.2.3 Network Operations},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2016-32/INPROC-2016-32.pdf},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Verteilte Systeme},
abstract = {The IEEE Time-sensitive Networking (TSN) Task Group has recently standardized
enhancements for IEEE 802.3 networks for enabling it to transport
time-triggered traffic (aka scheduled traffic) providing them with stringent
bounds on network delay and jitter while also transporting best-effort traffic.
These enhancements primarily include dedicating one queue per port of the
switch for scheduled traffic along with a programmable gating mechanism that
dictates which of the queues are to be considered for transmission. While the
IEEE 802.1Qbv standards define these mechanisms to handle scheduled traffic, it
stops short of specifying algorithms to compute fine-grained link schedules for
the streams of scheduled traffic. Further, the mechanisms in TSN require
creation of so-called guard bands to isolate scheduled traffic from the
best-effort traffic. These guard bands may potentially result in bandwidth
wastage, and hence schedules with lower number of guard bands are preferred. In
this paper, we introduce the No-wait Packet Scheduling Problem (NWPSP) for
modelling the scheduling in IEEE Time-sensitive Networks and map it to the
No-wait Job-shop Scheduling Problem (NW-JSP), a well-known problem from the
field of operational research. In particular, we present a Tabu search
algorithm for efficient computing of schedules and a schedule compression
technique to reduce number of guard bands in schedule. Our evaluations show
that our Tabu search algorithm can compute near-optimal schedules for over 1500
flows and the subsequent schedule compression reduces the number of guard bands
on an average by 24\%.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-32&engl=0}
}
@inproceedings {INPROC-2016-31,
author = {Naresh Ganesh Nayak and Frank D{\"u}rr and Kurt Rothermel},
title = {{Time-sensitive Software-defined Network (TSSDN) for Real-time Applications}},
booktitle = {Proceedings of the 24th International Conference on Real-Time Networks and Systems, RTNS 2016},
address = {Brest, France},
publisher = {ACM},
institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
pages = {1--10},
type = {Konferenz-Beitrag},
month = {Oktober},
year = {2016},
language = {Deutsch},
cr-category = {C.2.1 Network Architecture and Design,
C.2.3 Network Operations},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2016-31/INPROC-2016-31.pdf},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Verteilte Systeme},
abstract = {Cyber-physical systems (CPS), like the ones used in industrial automation
systems, are highly time-sensitive applications demanding zero packet losses
along with stringent real-time guarantees like bounded latency and jitter from
the underlying network for communication. With the proliferation of IEEE 802.3
and IP networks, there is a desire to use these networks instead of the
currently used fieldbuses for time-sensitive applications. However, these
networking technologies, which originally were designed to provide best effort
communication services, lack mechanisms for providing real-time guarantees. In
this paper, we present Time-Sensitive Software-Defined Networks (TSSDN), which
provide real-time guarantees for the time-triggered traffic in time-sensitive
systems while also transporting non-time-sensitive traffic. TSSDN provides
these guarantees by bounding the non-deterministic queuing delays for
time-sensitive traffic. To this end, it exploits the logical centralization
paradigm of software-defined networking to compute a transmission schedule for
time-sensitive traffic initiated by the end systems based on a global view. In
particular, we present various Integer Linear Program (ILP) formulations that
solve the combined problem of routing and scheduling time-triggered traffic.
Moreover, we show that end systems can comply with a given schedule with high
precision using user-space packet processing frameworks. Our evaluations show
that TSSDN has deterministic end-to-end delays ($<$= 14 us on our benchmark
topology) with low and bounded jitter ($<$= 7 us).},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-31&engl=0}
}
@inproceedings {INPROC-2016-18,
author = {Sukanya Bhowmik and Muhammad Adnan Tariq and Jonas Grunert and Kurt Rothermel},
title = {{Bandwidth-Efficient Content-Based Routing on Software-Defined Networks}},
booktitle = {Proceedings of the ACM International Conference on Distributed and Event-based Systems (DEBS 2016); Irvine, California, US, June 20-24, 2016},
publisher = {IEEE},
institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
pages = {1--8},
type = {Konferenz-Beitrag},
month = {Juni},
year = {2016},
language = {Englisch},
cr-category = {C.2.1 Network Architecture and Design,
C.2.4 Distributed Systems},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2016-18/INPROC-2016-18.pdf},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Verteilte Systeme},
abstract = {With the vision of Internet of Things gaining popularity at a global level,
efficient publish/subscribe middleware for communication within and across
datacenters is extremely desirable. In this respect, the very popular
Software-defined Networking (SDN), which enables publish/subscribe middleware
to perform line-rate filtering of events directly on hardware, can prove to be
very useful. While deploying content filters directly on switches of a
software-defined network allows optimized paths, high throughput rates, and low
end-to-end latency, it suffers from certain inherent limitations w.r.t. no. of
bits available on hardware switches to represent these filters. Such a
limitation affects expressiveness of filters, resulting in unnecessary traffic
in the network.
In this paper, we explore various techniques to represent content filters
expressively while being limited by hardware. We implement and evaluate
techniques that i) use workload, in terms of events and subscriptions, to
represent content, and ii) efficiently select attributes to reduce redundancy
in content. Moreover, these techniques complement each other and can be
combined together to further enhance performance. Our detailed performance
evaluations show the potential of these techniques in reducing unnecessary
traffic when subjected to different workloads.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-18&engl=0}
}
@inproceedings {INPROC-2016-17,
author = {Ruben Mayer and Christian Mayer and Muhammad Adnan Tariq and Kurt Rothermel},
title = {{GraphCEP - Real-time Data Analytics Using Parallel Complex Event and Graph Processing}},
booktitle = {Proceedings of the 10th ACM International Conference on Distributed Event-Based Systems, DEBS'16},
publisher = {ACM},
institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
pages = {1--8},
type = {Konferenz-Beitrag},
month = {Juni},
year = {2016},
doi = {10.1145/2933267.2933509},
language = {Englisch},
cr-category = {C.2.4 Distributed Systems},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2016-17/INPROC-2016-17.pdf},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Verteilte Systeme},
abstract = {In recent years, the proliferation of highly dynamic graph-structured data
streams fueled the demand for real-time data analytics. For instance, detecting
recent trends in social networks enables new applications in areas such as
disaster detection, business analytics or health-care. Parallel Complex Event
Processing has evolved as the paradigm of choice to analyze data streams in a
timely manner, where the incoming data streams are split and processed
independently by parallel operator instances. However, the degree of
parallelism is limited by the feasibility of splitting the data streams into
independent parts such that correctness of event processing is still ensured.
In this paper, we overcome this limitation for graph-structured data by further
parallelizing individual operator instances using modern graph processing
systems. These systems partition the graph data and execute graph algorithms in
a highly parallel fashion, for instance using cloud resources. To this end, we
propose a novel graph-based Complex Event Processing system GraphCEP and
evaluate its performance in the setting of two case studies from the DEBS Grand
Challenge 2016.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-17&engl=0}
}
@inproceedings {INPROC-2016-16,
author = {Sukanya Bhowmik and Muhammad Adnan Tariq and Lobna Hegazy and Kurt Rothermel},
title = {{Hybrid Content-based Routing Using Network and Application Layer Filtering}},
booktitle = {Proceedings of the 36th IEEE International Conference on Distributed Computing Systems (ICDCS 2016), Nara, Japan, June 27-30, 2016},
publisher = {IEEE},
institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
type = {Konferenz-Beitrag},
month = {Juni},
year = {2016},
keywords = {Content-based Routing; Publish/Subscribe; Software-defined Networking; Network Virtualization; Hybrid Routing},
language = {Englisch},
cr-category = {C.2.1 Network Architecture and Design,
C.2.4 Distributed Systems},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2016-16/INPROC-2016-16.pdf},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Verteilte Systeme},
abstract = {Over the past few decades, content-based publish/subscribe has been primarily
implemented as an overlay network of software brokers. Even though such systems
provide the possibility of bandwidth efficient expressive filtering in
software, they cannot match up to the performance (in terms of end-to-end
latency and throughput) of communication protocols implemented on the network
layer. To exploit network layer performance benefits, recently, content-based
publish/subscribe was realized using the capabilities of Software-defined
Networking (SDN). While SDN allows line-rate forwarding of events by content
filters directly installed on switches, it suffers from inherent hardware
limitations (w.r.t. flow table size, limited availability of bits in header
fields) that adversely impact expressiveness of these filters, resulting in
unnecessary network traffic.
In this paper, we strike a balance between purely application-layer-based and
purely network-layer-based publish/subscribe implementations by realizing the
first hybrid content-based middleware that enables filtering of events in both
layers. Moreover, we provide different selection algorithms with varying
degrees of complexity to determine the events to be filtered at each layer such
that unnecessary network traffic can be minimized while also considering delay
requirements of the middleware. Our hybrid middleware offers full flexibility
to configure it according to the performance requirements of the system. We
provide a detailed performance evaluation of the proposed selection algorithms
to determine their impact on the performance of the designed hybrid middleware
which we further compare to state-of-the art solutions.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-16&engl=0}
}
@inproceedings {INPROC-2016-15,
author = {David Richard Sch{\"a}fer and Andreas Wei{\ss} and Muhammad Adnan Tariq and Vasilios Andrikopoulos and Santiago G{\'o}mez S{\'a}ez and Lukas Krawczyk and Kurt Rothermel},
title = {{HAWKS: A System for Highly Available Executions of Workflows}},
booktitle = {Proceedings of the 13th IEEE International Conference on Services Computing: SCC'16; San Francisco, California, USA, June 27-July 2, 2016},
publisher = {IEEE},
institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
pages = {130--137},
type = {Konferenz-Beitrag},
month = {Juni},
year = {2016},
doi = {10.1109/SCC.2016.24},
keywords = {SOA; workflows; availability; replication; performance},
language = {Englisch},
cr-category = {C.2.4 Distributed Systems,
C.4 Performance of Systems},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2016-15/INPROC-2016-15.pdf,
http://dx.doi.org/10.1109/SCC.2016.24},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Architektur von Anwendungssystemen;
Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Verteilte Systeme},
abstract = {The workflow technology is the de facto standard for managing business
processes. Today, workflows are even used for automating interactions and
collaborations between business partners, e.g., for enabling just-in-time
production. Every workflow that is part of such a collaboration needs to be
highly available. Otherwise, the business operations, e.g., the production,
might be hindered or even stopped. Since today's business partners are
scattered across the globe, the workflows are executed in a highly distributed
and heterogeneous environment. Those environments are, however, failure-prone
and, thus, providing availability is not trivial. In this work, we improve
availability by replicating workflow executions, while ensuring that the
outcome is the same as in a non-replicated execution. For making workflow
replication easily usable with current workflow technology, we derive the
requirements for modeling a workflow replication system. Then, we propose the
HAWKS system, which adheres to the previously specified requirements and is
compatible with current technology. We implement a proof-of-concept in the
open-source workflow execution engine Apache ODE for demonstrating this
compatibility. Finally, we extensively evaluate the impact of using HAWKS in
terms of performance and availability in the presence of failures.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-15&engl=0}
}
@inproceedings {INPROC-2016-13,
author = {Christian Mayer and Muhammad Adnan Tariq and Chen Li and Kurt Rothermel},
title = {{GrapH: Heterogeneity-Aware Graph Computation with Adaptive Partitioning}},
booktitle = {Proceedings of the 2016 IEEE 36th International Conference on Distributed Computing Systems (ICDCS)},
publisher = {IEEE},
institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
pages = {118--128},
type = {Konferenz-Beitrag},
month = {Juni},
year = {2016},
doi = {10.1109/ICDCS.2016.92},
issn = {1063-6927},
keywords = {cloud computing; data analysis; GrapH; GraphX; PowerGraph; Pregel; adaptive edge migration strategy; adaptive partitioning; data access locality; data analytics; diverse vertex traffic; expensive network links; graph vertices; graph-structured data; heterogeneity-aware graph computation; heterogeneous network; specialized graph partitioning algorithms; suboptimal partitioning decisions; vertex-centric graph processing; vertex-cut graph partitioning; Automata; Computational modeling; Data analysis; Distributed databases; Heuristic algorithms; Mirrors; Partitioning algorithms},
language = {Englisch},
cr-category = {C.2.4 Distributed Systems},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2016-13/INPROC-2016-13.pdf,
http://dx.doi.org/10.1109/ICDCS.2016.92},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Verteilte Systeme},
abstract = {Vertex-centric graph processing systems such as Pregel, PowerGraph, or GraphX
recently gained popularity due to their superior performance of data analytics
on graph-structured data. These systems exploit the graph structure to improve
data access locality during computation, making use of specialized graph
partitioning algorithms. Recent partitioning techniques assume a uniform and
constant amount of data exchanged between graph vertices (i.e., uniform vertex
traffic) and homogeneous underlying network costs. However, in real-world
scenarios vertex traffic and network costs are heterogeneous. This leads to
suboptimal partitioning decisions and inefficient graph processing. To this
end, we designed GrapH, the first graph processing system using vertex-cut
graph partitioning that considers both, diverse vertex traffic and
heterogeneous network, to minimize overall communication costs. The main idea
is to avoid frequent communication over expensive network links using an
adaptive edge migration strategy. Our evaluations show an improvement of 60\% in
communication costs compared to state-of-the-art partitioning approaches.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-13&engl=0}
}
@inproceedings {INPROC-2016-11,
author = {Zohaib Riaz and Frank D{\"u}rr and Kurt Rothermel},
title = {{On the Privacy of Frequently Visited User Locations}},
booktitle = {Proceedings of the Seventeenth International Conference on Mobile Data Management: MDM'16; Porto, Portugal, June 13-16, 2016},
address = {Porto, Portugal},
publisher = {IEEE Computer Society},
institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
pages = {1--10},
type = {Konferenz-Beitrag},
month = {Juni},
year = {2016},
keywords = {Location Privacy, Location-based Applications, Semantic Locations, Visit-Frequency, Frequent locations, Geo-social networking, Location Servers, Non-trusted systems},
language = {Englisch},
cr-category = {K.4.1 Computers and Society Public Policy Issues,
C.2.4 Distributed Systems},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2016-11/INPROC-2016-11.pdf},
contact = {zohaib.riaz@ipvs.uni-stuttgart.de},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Verteilte Systeme},
abstract = {With the fast adoption of location-enabled devices, Location-based Applications
(LBAs) have become widely popular. While LBAs enable highly useful concepts
such as geo-social networking, their use also raises serious privacy concerns
as it involves sharing of location data with non-trusted third parties. In this
respect, we propose an approach that protects the frequently visited locations
of users, e.g., a bar, against inferences from longterm monitoring of their
location data. Such inferences equate a privacy leak as they reveal a user{\^a}€™s
personal behavior and interests to possibly malicious non-trusted parties.
To this end, we first present a study of a dataset of location check-ins to
show the existence of this threat among users of LBAs. We then propose our
approach to protect visit-frequency of the users to different locations by
distributing their location data among multiple third-party Location Servers.
This distribution not only serves to avoid a single point of failure for
privacy in our system, it also allows the users to control which LBA accesses
what information about them. We also describe a number of possible attacks
against our privacy approach and evaluate them on real-data from the check-ins
dataset. Our results show that our approach can effectively hide the frequent
locations while supporting good quality-of-service for the LBAs.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-11&engl=0}
}
@article {ART-2016-24,
author = {Sukanya Bhowmik and Muhammad Adnan Tariq and Boris Koldehofe and Frank D{\"u}rr and Thomas Kohler and Kurt Rothermel},
title = {{High Performance Publish/Subscribe Middleware in Software-Defined Networks}},
journal = {IEEE/ACM Transactions on Networking},
publisher = {IEEE},
volume = {25},
number = {3},
pages = {1--16},
type = {Artikel in Zeitschrift},
month = {Dezember},
year = {2016},
isbn = {10.1109/TNET.2016.2632970},
language = {Englisch},
cr-category = {C.2.1 Network Architecture and Design,
C.2.4 Distributed Systems},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/ART-2016-24/ART-2016-24.pdf},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Verteilte Systeme},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-24&engl=0}
}
@article {ART-2016-07,
author = {Thomas Kohler and Frank D{\"u}rr and Kurt Rothermel},
title = {{Consistent Network Management for Software-defined Networking based Multicast}},
journal = {IEEE Transactions on Network and Service Management},
editor = {Rolf Stadler},
publisher = {IEEE Communications Society},
pages = {1--1},
type = {Artikel in Zeitschrift},
month = {September},
year = {2016},
isbn = {10.1109/TNSM.2016.2585672},
keywords = {Software-defined networking, configuration management, update consistency, quality-of-service management, multicast communication},
language = {Englisch},
cr-category = {C.2.1 Network Architecture and Design,
C.2.3 Network Operations,
C.2.4 Distributed Systems},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/ART-2016-07/ART-2016-07.pdf,
http://dx.doi.org/10.1109/TNSM.2016.2585672},
contact = {thomas.kohler@ipvs.uni-stuttgart.de},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Verteilte Systeme},
abstract = {Updating a network is an essential and continual task in the management of
today{\^a}€™s softwarized networks. When applying updates on distributed network
elements, desired network properties, such as drop- and loop-freeness, might be
transiently violated. Although being crucial, update consistency has yet been
less considered in network management.
In this article, we argue for incorporating the particularities of update
consistency into the reconfiguration process of continuous network management.
We present a generic management architecture allowing for an appropriate
selection of an update mechanism and its parameters based on expected
inconsistency effects. We investigate update consistency for the case of
multicast routing and show in an extensive analysis why simultaneous drop- and
duplicate-freeness is not possible. We present an update procedure for
multicast routing updates that identifies critical update steps, which are fed
back into the reconfiguration process, along with a lightweight approach that
allows for the selection of an update strategy, preventing either drops or
duplicates. Furthermore, we present an optimization of an existing powerful,
but resource-intensive update approach as well as an approach for in-network
filtering of duplicates.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-07&engl=0}
}