Publikationen SGS: Bibliographie 2016 BibTeX
@inproceedings {INPROC-2016-59,
author = {Dirk Pfl{\"u}ger and David Pfander},
title = {{Computational Efficiency vs. Maintainability and Portability. Experiences with the Sparse Grid Code SG++}},
booktitle = {2016 Fourth International Workshop on Software Engineering for High Performance Computing in Computational Science and Engineering (SE-HPCCSE)},
address = {Salt Lake City, UT, USA},
publisher = {IEEE},
institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
pages = {17--25},
type = {Konferenz-Beitrag},
month = {November},
year = {2016},
isbn = {978-1-5090-5224-0},
keywords = {software maintenance; software quality; computational efficiency; computational maintainability; computational portability; design decisions; software quality; sparse grid code SG++; Computational modeling; Hardware; Programming; Software engineering; Software quality; Usability},
language = {Deutsch},
cr-category = {G.1.0 Numerical Analysis General,
D.2.3 Software Engineering Coding Tools and Techniques,
D.2.11 Software Engineering Software Architectures,
D.2.13 Software Engineering Reusable Software},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Simulation gro{\ss}er Systeme},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-59&engl=0}
}
@inproceedings {INPROC-2016-57,
author = {Michael Lahnert and Takayuki Aoki and Carsten Burstedde and Miriam Mehl},
title = {{Minimally-Invasive Integration of P4est in Espresso for Adaptive Lattice-Boltzmann}},
booktitle = {The 30th Computational Fluid Dynamics Symposium ; Tokyo, Japan, December 12--14, 2016.},
publisher = {Japan Society of Fluid Mechanics},
institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
pages = {1--7},
type = {Konferenz-Beitrag},
month = {Dezember},
year = {2016},
language = {Deutsch},
cr-category = {I.6 Simulation and Modeling},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Simulation gro{\ss}er Systeme},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-57&engl=0}
}
@inproceedings {INPROC-2016-56,
author = {David Pfander and Alexander Heinecke and Dirk Pfl{\"u}ger},
title = {{A New Subspace-Based Algorithm for Efficient Spatially Adaptive Sparse Grid Regression, Classification and Multi-evaluation}},
booktitle = {Sparse Grids and Applications - Stuttgart 2014},
editor = {Jochen Garcke and Dirk Pfl{\"u}ger},
publisher = {Springer International Publishing},
institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
series = {Lecture Notes in Computational Science and Engineering},
volume = {109},
pages = {221--246},
type = {Konferenz-Beitrag},
month = {Januar},
year = {2016},
keywords = {Sparse Grids; Performance Optimization; Adaptivity; High-Performance Computing},
language = {Deutsch},
cr-category = {G.4 Mathematical Software,
D.1.3 Concurrent Programming},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Simulation gro{\ss}er Systeme},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-56&engl=0}
}
@inproceedings {INPROC-2016-53,
author = {Dirk Pfl{\"u}ger and Miriam Mehl and Julian Valentin and Florian Lindner and David Pfander and Stefan Wagner and Daniel Graziotin and Yang Wang},
title = {{The Scalability-Efficiency/Maintainability-Portability Trade-Off in Simulation Software Engineering: Examples and a Preliminary Systematic Literature Review}},
booktitle = {Proceedings of 2016 Fourth International Workshop on Software Engineering for High Performance Computing in Computational Science and Engineering (SE-HPCCSE 2016), held in conjunction with SC16, Salt Lake City, Utah},
publisher = {IEEE Computer Society; ACM},
institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
pages = {26--34},
type = {Workshop-Beitrag},
month = {November},
year = {2016},
doi = {10.1109/SE-HPCCSE.2016.008},
keywords = {digital simulation; software maintenance; software portability; SLR; SSE; complex software; dynamic construction process; maintainability-portability trade-off; scalability-efficiency trade-off; simulation software engineering; systematic literature review; Computational modeling; Hardware; Mathematical model; Numerical models; Scalability; Software; Software engineering},
language = {Englisch},
cr-category = {D.2.0 Software Engineering General},
ee = {https://dx.doi.org/10.1109/SE-HPCCSE.2016.008},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Simulation gro{\ss}er Systeme},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-53&engl=0}
}
@inproceedings {INPROC-2016-51,
author = {Mario Heene and Dirk Pfl{\"u}ger},
title = {{Scalable Algorithms for the Solution of Higher-Dimensional PDEs}},
booktitle = {Software for Exascale Computing - SPPEXA 2013-2015},
editor = {Hans-Joachim. Bungartz and Neumann Philipp and Nagel Wolfgang},
address = {Cham},
publisher = {Springer},
institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
series = {LNCSE},
volume = {113},
pages = {165--186},
type = {Konferenz-Beitrag},
month = {September},
year = {2016},
doi = {10.1007/978-3-319-40528-5_8},
language = {Deutsch},
cr-category = {G.4 Mathematical Software},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Simulation gro{\ss}er Systeme},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-51&engl=0}
}
@inproceedings {INPROC-2016-50,
author = {Markus Hegland and Brendan Harding and Christoph Kowitz and Dirk Pfl{\"u}ger and Peter Strazdins},
title = {{Recent Developments in the Theory and Application of the Sparse Grid Combination Technique}},
booktitle = {Software for Exascale Computing - SPPEXA 2013-2015},
editor = {Hans-Joachim Bungartz and Philipp Neumann and Wolfgang E. Nagel},
publisher = {Springer International Publishing},
institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
pages = {143--163},
type = {Konferenz-Beitrag},
month = {Januar},
year = {2016},
isbn = {978-3-319-40528-5},
doi = {10.1007/978-3-319-40528-5_7},
language = {Deutsch},
cr-category = {I.6 Simulation and Modeling,
B.8.1 Reliability, Testing, and Fault-Tolerance},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Simulation gro{\ss}er Systeme},
abstract = {Substantial modifications of both the choice of the grids, the combination
coefficients, the parallel data structures and the algorithms used for the
combination technique lead to numerical methods which are scalable. This is
demonstrated by the provision of error and complexity bounds and in performance
studies based on a state of the art code for the solution of the gyrokinetic
equations of plasma physics. The key ideas for a new fault-tolerant combination
technique are mentioned. New algorithms for both initial- and eigenvalue
problems have been developed and are shown to have good performance.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-50&engl=0}
}
@inproceedings {INPROC-2016-45,
author = {Steffen Hirschmann and Dirk Pfl{\"u}ger and Colin W. Glass},
title = {{Towards Understanding Optimal Load-Balancing of Heterogeneous Short-Range Molecular Dynamics}},
booktitle = {2016 IEEE 23rd International Conference on High Performance Computing Workshops (HiPCW)},
publisher = {IEEE},
institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
pages = {130--141},
type = {Konferenz-Beitrag},
month = {Dezember},
year = {2016},
doi = {10.1109/HiPCW.2016.027},
language = {Englisch},
cr-category = {G.1.6 Numerical Analysis Optimization},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2016-45/INPROC-2016-45.pdf},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Simulation gro{\ss}er Systeme},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-45&engl=0}
}
@inproceedings {INPROC-2016-43,
author = {Arash Bakhtiari and Dhairya Malhotra and Amir Raoofy and Miriam Mehl and Hans-Joachim Bungartz and George Biros},
title = {{A Parallel Arbitrary-order Accurate AMR Algorithm for the Scalar Advection-diffusion Equation}},
booktitle = {Proceedings of the International Conference for High Performance Computing, Networking, Storage and Analysis},
address = {Piscataway, NJ, USA},
publisher = {IEEE Press},
institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
pages = {1--12},
type = {Konferenz-Beitrag},
month = {November},
year = {2016},
isbn = {978-1-4673-8815-3},
keywords = {adaptive mesh refinement, semi-Lagrangian, fast multipole, parallel computing},
language = {Englisch},
cr-category = {J.0 Computer Applications General},
ee = {http://dl.acm.org/citation.cfm?id=3014904.3014963},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Simulation gro{\ss}er Systeme},
abstract = {We present a numerical method for solving the scalar advection-diffusion
equation using adaptive mesh re- finement. Our solver has three unique
characteristics: (1) it supports arbitrary-order accuracy in space; (2) it
allows different discretizations for the velocity and scalar advected quantity;
and (3) it combines the method of characteristics with an integral equation
formulation. In particular, our solver is based on a second-order accurate,
unconditionally stable, semi-Lagrangian scheme combined with a
spatially-adaptive Chebyshev octree for discretization. We study the
convergence, single-node perfor- mance, strong scaling, and weak scaling of our
scheme for several challenging flows that cannot be resolved efficiently
without using high-order accurate discretizations. For example, we consider
problems for which switching from 4th order to 14th order approximation results
in two orders of magnitude speedups for a fixed accuracy. For our largest run,
we solve a problem with one billion unknowns on a tree with maximum depth equal
to 10 and 14th-order elements on 16,384 cores on the {\^a}€¯STAMPEDE{\^a}€¯ system at
the Texas Advanced Computing Center.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-43&engl=0}
}
@inproceedings {INPROC-2016-20,
author = {Michael Lahnert and Carsten Burstedde and Christian Holm and Miriam Mehl and Georg Rempfer and Florian Weik},
title = {{Towards Lattice-Boltzmann on Dynamically Adaptive Grids -- Minimally-Invasive Grid Exchange in ESPResSo}},
booktitle = {ECCOMAS Congress 2016, VII European Congress on Computational Methods in Applied Sciences and Engineering},
editor = {M. Papadrakakis and V. Papadopoulos and G. Stefanou and V. Plevris},
publisher = {ECCOMAS},
institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
pages = {1--25},
type = {Konferenz-Beitrag},
month = {Juni},
year = {2016},
language = {Englisch},
cr-category = {G.1.8 Partial Differential Equations},
ee = {https://www.eccomas2016.org/proceedings/pdf/4659.pdf},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Simulation gro{\ss}er Systeme},
abstract = {We present the minimally-invasive exchange of the regular Cartesian grid in the
lattice-Boltzmann solver of ESPResSo by a dynamically-adaptive octree grid.
Octree grids are favoured by computer scientists over other grid types as they
are very memory-efficient. In addition, they represent a natural generalisation
of regular Cartesian grids, such that most discretisation details of a regular
grid solver can be maintained. Optimised codes, however, require a special
tree-oriented grid traversal, which typically conflicts with existing
simulation codes using various iterators, some for only parts of the grid,
e.g., boundaries. ESPResSo is a large software package developed for
soft-matter simulations involving fluid flow, electrostatic, and electrokinetic
effects, and molecular dynamics. The currently used regular Cartesian grid
hinders the simulation of realistic domain sizes and significant time periods,
a problem that can be solved using grid adaptivity. In a first step, we focus
on the lattice-Boltzmann flow solver in ESPResSo. p4est is a grid framework,
that already provides dynamically adaptive quadtree and octree grids together
with high-level interfaces for flexible grid traversals with direct neighbour
access in all grid components. In this paper, we first describe extensions of
p4est that were necessary to fulfill certain application requirements. The
second part of our work consists of the minimally-invasive changes in ESPResSo
preserving the expertise accumulated in the software's implementation over
years. Our numerical results demonstrate physical correctness of the
implementation, good parallel scalability and low overhead of the dynamical
grid adaptivity. These are prerequisites to actually profit from grid
adaptivity in terms of being able to simulate larger domains over longer time
periods with limited computational resources. Thus, the current status forms a
solid basis for further steps such as the development of refinement criteria,
the setup of more realistic application scenarios, and a GPU implementation.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-20&engl=0}
}
@inproceedings {INPROC-2016-12,
author = {Julian Valentin and Dirk Pfl{\"u}ger},
title = {{Hierarchical Gradient-Based Optimization with B-Splines on Sparse Grids}},
booktitle = {Sparse Grids and Applications - Stuttgart 2014},
editor = {Jochen Garcke and Dirk Pfl{\"u}ger},
publisher = {Springer},
institution = {Universit{\"a}t Stuttgart, Fakult{\"a}t Informatik, Elektrotechnik und Informationstechnik, Germany},
series = {Lecture Notes in Computational Science and Engineering},
volume = {109},
pages = {315--336},
type = {Konferenz-Beitrag},
month = {M{\"a}rz},
year = {2016},
doi = {10.1007/978-3-319-28262-6_13},
keywords = {sparse grids; optimization; B-splines},
language = {Englisch},
cr-category = {G.1.6 Numerical Analysis Optimization},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2016-12/INPROC-2016-12.pdf,
https://dx.doi.org/10.1007/978-3-319-28262-6_13},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Simulation gro{\ss}er Systeme},
abstract = {Optimization algorithms typically perform a series of function evaluations to
find an approximation of an optimal point of the objective function.
Evaluations can be expensive, e.g., if they depend on the results of a complex
simulation. When dealing with higher-dimensional functions, the curse of
dimensionality increases the difficulty of the problem rapidly and prohibits a
regular sampling. Instead of directly optimizing the objective function, we
replace it with a sparse grid interpolant, saving valuable function
evaluations. We generalize the standard piecewise linear basis to hierarchical
B-splines, making the sparse grid surrogate smooth enough to enable
gradient-based optimization methods. Also, we use an uncommon refinement
criterion due to Novak and Ritter to generate an appropriate sparse grid
adaptively. Finally, we evaluate the new method for various artificial and
real-world examples.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2016-12&engl=0}
}
@article {ART-2016-25,
author = {R. Haelterman and A.E.J. Bogaers and K. Scheufele and B. Uekermann and M. Mehl},
title = {{Improving the Performance of the Partitioned QN-ILS Procedure for Fluid-structure Interaction Problems}},
journal = {Comput. Struct.},
publisher = {Pergamon Press, Inc.},
volume = {171},
number = {C},
pages = {9--17},
type = {Artikel in Zeitschrift},
month = {Januar},
year = {2016},
doi = {10.1016/j.compstruc.2016.04.001},
issn = {0045-7949},
keywords = {Filtering, Fluid-structure interaction, Least squares, Quasi-Newton method},
language = {Deutsch},
cr-category = {G.4 Mathematical Software,
G.1.6 Numerical Analysis Optimization},
ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/ART-2016-25/ART-2016-25.pdf},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Simulation gro{\ss}er Systeme},
abstract = {The Quasi-Newton Inverse Least Squares method has become a popular method to
solve partitioned interaction problems. Its performance can be enhanced by
using information from previous time-steps if care is taken of the possible
ill-conditioning that results. To enhance the stability, filtering has been
used. In this paper we show that a relatively minor modification to the
filtering technique can substantially reduce the required number of iterations.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-25&engl=0}
}
@article {ART-2016-21,
author = {Haeltermann R. and Bogaers A.E.J. and Scheufele K. and B. Uekermann and M. Mehl},
title = {{Improving the Performance of the Partitioned QN-ILS Procedure for Fluid-structure Interaction Problems}},
journal = {Journal Computers and Structures},
address = {Elmsford, NY, USA},
publisher = {Pergamon Press, Inc.},
volume = {171},
number = {C},
pages = {9--17},
type = {Artikel in Zeitschrift},
month = {Juli},
year = {2016},
doi = {10.1016/j.compstruc.2016.04.001},
keywords = {fluid-structure interaction, quasi-Newton, filtering, partitoned},
language = {Englisch},
cr-category = {J.0 Computer Applications General},
ee = {http://dl.acm.org/citation.cfm?id=2956318},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Simulation gro{\ss}er Systeme},
abstract = {The Quasi-Newton Inverse Least Squares method has become a popular method to
solve partitioned interaction problems. Its performance can be enhanced by
using information from previous time-steps if care is taken of the possible
ill-conditioning that results. To enhance the stability, ltering has been used.
In this paper we show that a relatively minor modi cation to the ltering
technique can substantially reduce the required number of iterations.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-21&engl=0}
}
@article {ART-2016-11,
author = {R. Haeltermann and A.E.J. Bogaers and K Scheufele and B. Uekermann and M. Mehl},
title = {{Improving the performance of the partitioned QN-ILS procedure for fluid–structure interaction problems: Filtering}},
journal = {Computers \& Structures},
publisher = {Elsevier},
volume = {171},
pages = {9--17},
type = {Artikel in Zeitschrift},
month = {Mai},
year = {2016},
issn = {0045-7949},
doi = {http://dx.doi.org/10.1016/j.compstruc.2016.04.001},
keywords = {Fluid–structure interaction; Quasi-Newton method; Least squares; Filtering},
language = {Englisch},
cr-category = {G.1.3 Numerical Linear Algebra,
G.1.8 Partial Differential Equations},
ee = {http://www.sciencedirect.com/science/article/pii/S004579491630164X},
contact = {miriam.mehl@ipvs.uni-stuttgart.de},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Simulation gro{\ss}er Systeme},
abstract = {The Quasi-Newton Inverse Least Squares method has become a popular method to
solve partitioned interaction problems. Its performance can be enhanced by
using information from previous time-steps if care is taken of the possible
ill-conditioning that results. To enhance the stability, filtering has been
used. In this paper we show that a relatively minor modification to the
filtering technique can substantially reduce the required number of iterations.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-11&engl=0}
}
@article {ART-2016-05,
author = {R. Haeltermann and A.E.J. Bogaers and K. Scheufele and B. Uekermann and M. Mehl},
title = {{Improving the performance of the partitioned QN-ILS procedure for fluid–structure interaction problems: Filtering}},
journal = {Computers \& Structures},
publisher = {Elsevier},
volume = {171},
pages = {9--17},
type = {Artikel in Zeitschrift},
month = {Mai},
year = {2016},
doi = {http://dx.doi.org/10.1016/j.compstruc.2016.04.001},
keywords = {Fluid–structure interaction; Quasi-Newton method; Least squares; Filtering},
language = {Englisch},
cr-category = {J.2 Physical Sciences and Engineering},
ee = {http://www.sciencedirect.com/science/article/pii/S004579491630164X},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Simulation gro{\ss}er Systeme},
abstract = {Abstract In the emerging field of multi-physics simulations, we often face the
challenge to establish new connections between physical fields, to add
additional aspects to existing models, or to exchange a solver for one of the
involved physical fields. If in such cases a fast prototyping of a coupled
simulation environment is required, a partitioned setup using existing codes
for each physical field is the optimal choice. As accurate models require also
accurate numerics, multi-physics simulations typically use very high grid
resolutions and, accordingly, are run on massively parallel computers. Here, we
face the challenge to combine flexibility with parallel scalability and
hardware efficiency. In this paper, we present the coupling tool preCICE which
offers the complete coupling functionality required for a fast development of a
multi-physics environment using existing, possibly black-box solvers. We hereby
restrict ourselves to bidirectional surface coupling which is too expensive to
be done via file communication, but in contrast to volume coupling still a
candidate for distributed memory parallelism between the involved solvers. The
paper gives an overview of the numerical functionalities implemented in preCICE
as well as the user interfaces, i.e., the application programming interface and
configuration options. Our numerical examples and the list of different
open-source and commercial codes that have already been used with preCICE in
coupled simulations show the high flexibility, the correctness, and the high
performance and parallel scalability of coupled simulations with preCICE as the
coupling unit.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-05&engl=0}
}
@article {ART-2016-04,
author = {Patrick Diehl and Fabian Franzelin and Dirk Pfl{\"u}ger and Georg C. Ganzenm{\"u}ller},
title = {{Bond-based peridynamics: a quantitative study of Mode I crack opening}},
journal = {International Journal of Fracture},
publisher = {Springer},
pages = {1--14},
type = {Artikel in Zeitschrift},
month = {Mai},
year = {2016},
issn = {1573-2673},
doi = {10.1007/s10704-016-0119-5},
keywords = {Bond-based peridynamics; EMU-ND; critical traction; sparse grids},
language = {Englisch},
cr-category = {I.6 Simulation and Modeling,
G.1.8 Partial Differential Equations},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Simulation gro{\ss}er Systeme},
abstract = {This paper shows a new approach to estimate the critical traction for Mode I
crack opening before crack growth by numerical simulation. For quasi-static
loading, Linear Elastic Fracture Mechanics predicts the critical traction
before crack growth. To simulate the crack growth, we used bond-based
peridynamics, a non-local generalization of continuum mechanics. We discretize
the peridynamics equation of motion with a collocation by space approach, the
so-called EMU nodal discretization. As the constitutive law, we employ the
improved prototype micro brittle material model. This bond-based material model
is verified by the Young's modulus from classical theory for a homogeneous
deformation for different quadrature rules. For the EMU-ND we studied the
behavior for different ratios of the horizon and nodal spacing to gain a robust
value for a large variety of materials. To access this wide range of materials,
we applied sparse grids, a technique to build high-dimensional surrogate
models. Sparse grids significantly reduce the number of simulation runs
compared to a full grid approach and keep up a similar approximation accuracy.
For the validation of the quasi-static loading process, we show that the
critical traction is independent of the material density for most material
parameters. The bond-based IPMB model with EMU nodal discretization seems very
robust for the ratio \$$\backslash$delta/$\backslash$Delta X=3\$ for a wide range of materials, if an
error of 5$\backslash$\% is acceptable.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-04&engl=0}
}
@article {ART-2016-03,
author = {Philipp Hupp and Mario Heene and Riko Jacob and Dirk Pfl{\"u}ger},
title = {{Global communication schemes for the numerical solution of high-dimensional PDEs}},
journal = {Parallel Computing},
address = {Amsterdam, The Netherlands},
publisher = {Elsevier Science Publishers},
volume = {52},
pages = {78--105},
type = {Artikel in Zeitschrift},
month = {Februar},
year = {2016},
issn = {0167-8191},
keywords = {Communication model; Communication performance analysis; Experimental evaluation; Global communication; High-performance computing; Sparse grid combination technique},
language = {Englisch},
cr-category = {G.1.0 Numerical Analysis General,
G.1.8 Partial Differential Equations,
D.4 Operating Systems,
D.1.3 Concurrent Programming,
F.2.1 Numerical Algorithms and Problems},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Simulation gro{\ss}er Systeme},
abstract = {We study the global communication of the numerical solution of high-dimensional
PDEs.We design two optimal communication schemes for the sparse grid
combination technique.We present a new communication model based on the
system's latency and bandwidth.The communication model predicts the performance
of the communication schemes.Experimental results on several current
supercomputers confirm the predictions. The numerical treatment of
high-dimensional partial differential equations is among the most
compute-hungry problems and in urgent need for current and future
high-performance computing (HPC) systems. It is thus also facing the grand
challenges of exascale computing such as the requirement to reduce global
communication. To cope with high dimensionalities we employ a hierarchical
discretization scheme, the sparse grid combination technique. Based on an
extrapolation scheme, the combination technique additionally mitigates the need
for global communication: multiple and much smaller problems can be computed
independently for each time step, and the global communication shrinks to a
reduce/broadcast step in between. Here, we focus on this remaining
synchronization step of the combination technique and present two communication
schemes designed to either minimize the number of communication rounds or the
total communication volume. Experiments on two different supercomputers show
that either of the schemes outperforms the other depending on the size of the
problem. Furthermore, we present a communication model based on the system's
latency and bandwidth and validate the model with the experiments. The model
can be used to predict the runtime of the reduce/broadcast step for
dimensionalities that are yet out of scope on current supercomputers.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-03&engl=0}
}
@article {ART-2016-02,
author = {Hans-Joachim Bungartz and Florian Lindner and Bernhard Gatzhammer and Miriam Mehl and Klaudius Scheufele and Alexander Shukaev and Benjamin Uekermann},
title = {{preCICE – A Fully Parallel Library for Multi-Physics Surface Coupling}},
journal = {Computers \& Fluids},
publisher = {Elsevier},
pages = {1--1},
type = {Artikel in Zeitschrift},
month = {Januar},
year = {2016},
doi = {http://dx.doi.org/10.1016/j.compfluid.2016.04.003},
issn = {0045-7930},
keywords = {partitioned multi-physics; strong coupling; non-matching grids; inter-code communication; quasi-Newton; radial basis functions; high performance computing},
language = {Deutsch},
cr-category = {G.1.0 Numerical Analysis General,
D.0 Software General},
ee = {http://www.sciencedirect.com/science/article/pii/S0045793016300974},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Simulation gro{\ss}er Systeme},
abstract = {Abstract In the emerging field of multi-physics simulations, we often face the
challenge to establish new connections between physical fields, to add
additional aspects to existing models, or to exchange a solver for one of the
involved physical fields. If in such cases a fast prototyping of a coupled
simulation environment is required, a partitioned setup using existing codes
for each physical field is the optimal choice. As accurate models require also
accurate numerics, multi-physics simulations typically use very high grid
resolutions and, accordingly, are run on massively parallel computers. Here, we
face the challenge to combine flexibility with parallel scalability and
hardware efficiency. In this paper, we present the coupling tool preCICE which
offers the complete coupling functionality required for a fast development of a
multi-physics environment using existing, possibly black-box solvers. We hereby
restrict ourselves to bidirectional surface coupling which is too expensive to
be done via file communication, but in contrast to volume coupling still a
candidate for distributed memory parallelism between the involved solvers. The
paper gives an overview of the numerical functionalities implemented in preCICE
as well as the user interfaces, i.e., the application programming interface and
configuration options. Our numerical examples and the list of different
open-source and commercial codes that have already been used with preCICE in
coupled simulations show the high flexibility, the correctness, and the high
performance and parallel scalability of coupled simulations with preCICE as the
coupling unit.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-02&engl=0}
}
@article {ART-2016-01,
author = {Miriam Mehl and Benjamin Uekermann and Hester Bijl and David Blom and Bernhard Gatzhammer and Alexander van Zuijlen},
title = {{Parallel coupling numerics for partitioned fluid–structure interaction simulations}},
journal = {Computers \& Mathematics with Applications},
publisher = {Elsevier},
volume = {71},
number = {4},
pages = {869--891},
type = {Artikel in Zeitschrift},
month = {Januar},
year = {2016},
issn = {0898-1221},
doi = {http://dx.doi.org/10.1016/j.camwa.2015.12.025},
keywords = {Fluid–structure interaction, partitioned simulation, parallel coupling methods, quasi-Newton, high performance computing},
language = {Deutsch},
cr-category = {J.2 Physical Sciences and Engineering},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Simulation gro{\ss}er Systeme},
abstract = {Within the last decade, very sophisticated numerical methods for the iterative
and partitioned solution of fluid-structure interaction problems have been
developed that allow for high accuracy and very complex scenarios. The
combination of these two aspects {\^a}€“ accuracy and com- plexity {\^a}€“ demands very
high computational grid resolutions and, thus, high performance computing
methods designed for massively parallel hardware architectures. For those
architectures, currently used coupling method that mainly work with a staggered
execution of the fluid and structure solver, i.e., the execution of one solver
after the other in every outer iteration, lead to severe load imbal- ances: if
the flow solver, e.g., scales on a very large number of processors but the
structural solver does not due to its limited amount of data and required
operations, almost all processors assigned to the coupled simulations are idle
while the structure solver executes. We propose two new iterative coupling
methods that allow for simultaneous execution of flow and structure solvers. In
both cases, we show that pure fixed-point iterations based on the parallel
execution of the solvers do not lead to good results, but the combination of
parallel solver execution and so-called quasi-Newton methods yields very
efficient and robust methods. Those methods are known to be very efficient also
for the stabilization of critical scenarios solved with the standard staggered
solver execution. We demon- strate the competitive convergence of our methods
for various established benchmark scenarios. Both methods are perfectly suited
for use with black-box solvers because the quasi-Newton approach uses solely
in- and output information of the solvers to approximate the effect of the
unknown Jacobians that would be required in a standard Newton solver.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2016-01&engl=0}
}
@inbook {INBOOK-2016-07,
author = {Fabian Franzelin and Dirk Pfl{\"u}ger},
title = {{From Data to Uncertainty: An Efficient Integrated Data-Driven Sparse Grid Approach to Propagate Uncertainty}},
series = {Sparse Grids and Applications - Stuttgart 2014},
publisher = {Springer International Publishing},
pages = {29--49},
type = {Beitrag in Buch},
month = {Januar},
year = {2016},
doi = {10.1007/978-3-319-28262-6_2},
isbn = {978-3-319-28262-6},
keywords = {sparse grids, hierarchical basis, adaptive, stochastic collocation},
language = {Deutsch},
cr-category = {G.1.1 Numerical Analysis Interpolation,
G.3 Probability and Statistics,
G.4 Mathematical Software},
contact = {Fabian Franzelin fabian.franzelin@ipvs.uni-stuttgart.de},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Simulation gro{\ss}er Systeme},
abstract = {},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2016-07&engl=0}
}
@inbook {INBOOK-2016-06,
author = {Hans-Joachim Bungartz and Florian Lindner and Mehl Miriam and Klaudius Scheufele and Alexander Shukaev and Benjamin Uekermann},
title = {{Partitioned Fluid–Structure–Acoustics Interaction on Distributed Data: Coupling via preCICE}},
series = {Software for Exascale Computing - SPPEXA 2013-2015},
publisher = {Springer International Publishing},
pages = {239--266},
type = {Beitrag in Buch},
month = {Januar},
year = {2016},
isbn = {978-3-319-40528-5},
doi = {10.1007/978-3-319-40528-5_11},
keywords = {preCICE},
language = {Englisch},
cr-category = {D.0 Software General},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Simulation gro{\ss}er Systeme},
abstract = {One of the great prospects of exascale computing is to simulate chal- lenging
highly complex multi-physics scenarios with different length and time scales. A
modular approach re-using existing software for the single-physicsmodel parts
has great advantages regarding flexibility and software development costs. At
the same time, it poses challenges in terms of numerical stability and parallel
scalability. The coupling library preCICE provides communication, data mapping,
and coupling numerics for surface-coupled multi-physics applications in a
highly modular way.We recapitulate the numerical methods but focus particularly
on their parallel implementation. The numerical results for an artificial
coupling interface showa very small runtime of the coupling compared to typical
solver runtimes and a good parallel scalability on a number of cores
corresponding to amassively parallel simulation for an actual, coupled
simulation. Further results for actual application scenarios from the field of
fluid-structure-acoustic interactions are presented in the next chapter.},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2016-06&engl=0}
}
@book {BOOK-2016-01,
editor = {Jochen Garcke and Dirk Pfl{\"u}ger},
title = {{Sparse Grids and Applications - Stuttgart 2014}},
publisher = {Springer International Publishing},
series = {Lecture Notes in Computational Science and Engineering},
volume = {107},
pages = {336},
type = {Buch},
month = {Januar},
year = {2016},
language = {Englisch},
cr-category = {G.0 Mathematics of Computing General},
department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme, Simulation gro{\ss}er Systeme;
Universit{\"a}t Stuttgart, Institut f{\"u}r Parallele und Verteilte Systeme},
abstract = {Sparse grids are a popular approach for the numerical treatment of
high-dimensional problems. Where classical numerical discretization schemes
fail in more than three or four dimensions, sparse grids, in their different
flavors, are frequently the method of choice, be it spatially adaptive in the
hierarchical basis or via the dimensionally adaptive combination technique. The
third Workshop on Sparse Grids and Applications (SGA2014), which took place at
the University of Stuttgart from September 1 to 5 in 2014, demonstrated once
again the importance of this numerical discretization scheme. Organized by
Hans-Joachim Bungartz, Jochen Garcke, Michael Griebel, Markus Hegland, Dirk
Pfl{\"u}ger, and Clayton Webster, almost 60 participants from 8 different
countries have presented and discussed the current state of the art of sparse
grids and their applications. Thirty-eight talks covered their numerical
analysis as well as efficient data structures and new forms of adaptivity and a
range of applications from clustering and model order reduction to uncertainty
quantification settings and optimization. As a novelty, the topic
high-performance computing covered several talks, targeting exascale computing
and related tasks. Besides data structures and communication patterns with
excellent parallel scalability, fault tolerance was introduced to the SGA
series, the hierarchical approach providing novel approaches to the treatment
of hardware failures without checkpoint restart. This volume of LNCSE collects
selected contributions from attendees of the workshop},
url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=BOOK-2016-01&engl=0}
}