dune-istl  2.6-git
fastamg.hh
Go to the documentation of this file.
1 // -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 // vi: set et ts=4 sw=2 sts=2:
3 #ifndef DUNE_ISTL_FASTAMG_HH
4 #define DUNE_ISTL_FASTAMG_HH
5 
6 #include <memory>
7 #include <dune/common/exceptions.hh>
8 #include <dune/common/typetraits.hh>
9 #include <dune/common/unused.hh>
13 #include <dune/istl/solvers.hh>
15 #include <dune/istl/superlu.hh>
16 #include <dune/istl/umfpack.hh>
17 #include <dune/istl/solvertype.hh>
18 #include <dune/istl/io.hh>
20 
21 #include "fastamgsmoother.hh"
22 
31 namespace Dune
32 {
33  namespace Amg
34  {
57  template<class M, class X, class PI=SequentialInformation, class A=std::allocator<X> >
58  class FastAMG : public Preconditioner<X,X>
59  {
60  public:
62  typedef M Operator;
69  typedef PI ParallelInformation;
74 
76  typedef X Domain;
78  typedef X Range;
81 
89  FastAMG(const OperatorHierarchy& matrices, CoarseSolver& coarseSolver,
90  const Parameters& parms,
91  bool symmetric=true);
92 
104  template<class C>
105  FastAMG(const Operator& fineOperator, const C& criterion,
106  const Parameters& parms=Parameters(),
107  bool symmetric=true,
109 
113  FastAMG(const FastAMG& amg);
114 
115  ~FastAMG();
116 
118  void pre(Domain& x, Range& b);
119 
121  void apply(Domain& v, const Range& d);
122 
125  {
127  }
128 
130  void post(Domain& x);
131 
136  template<class A1>
137  void getCoarsestAggregateNumbers(std::vector<std::size_t,A1>& cont);
138 
139  std::size_t levels();
140 
141  std::size_t maxlevels();
142 
152  {
153  matrices_->recalculateGalerkin(NegateSet<typename PI::OwnerSet>());
154  }
155 
160  bool usesDirectCoarseLevelSolver() const;
161 
162  private:
169  template<class C>
170  void createHierarchies(C& criterion, Operator& matrix,
171  const PI& pinfo);
172 
179  struct LevelContext
180  {
192  typename OperatorHierarchy::RedistributeInfoList::const_iterator redist;
196  typename OperatorHierarchy::AggregatesMapList::const_iterator aggregates;
212  std::size_t level;
213  };
214 
216  void mgc(LevelContext& levelContext, Domain& x, const Range& b);
217 
224  void presmooth(LevelContext& levelContext, Domain& x, const Range& b);
225 
232  void postsmooth(LevelContext& levelContext, Domain& x, const Range& b);
233 
240  void moveToFineLevel(LevelContext& levelContext, bool processedFineLevel,
241  Domain& fineX);
242 
247  bool moveToCoarseLevel(LevelContext& levelContext);
248 
253  void initIteratorsWithFineLevel(LevelContext& levelContext);
254 
256  std::shared_ptr<OperatorHierarchy> matrices_;
258  std::shared_ptr<CoarseSolver> solver_;
260  Hierarchy<Range,A>* rhs_;
262  Hierarchy<Domain,A>* lhs_;
264  Hierarchy<Domain,A>* residual_;
265 
269  std::shared_ptr<ScalarProduct> scalarProduct_;
271  std::size_t gamma_;
273  std::size_t preSteps_;
275  std::size_t postSteps_;
276  std::size_t level;
277  bool buildHierarchy_;
278  bool symmetric;
279  bool coarsesolverconverged;
281  typedef std::shared_ptr<Smoother> SmootherPointer;
282  SmootherPointer coarseSmoother_;
284  std::size_t verbosity_;
285  };
286 
287  template<class M, class X, class PI, class A>
289  : matrices_(amg.matrices_), solver_(amg.solver_),
290  rhs_(), lhs_(), residual_(), scalarProduct_(amg.scalarProduct_),
291  gamma_(amg.gamma_), preSteps_(amg.preSteps_), postSteps_(amg.postSteps_),
292  symmetric(amg.symmetric), coarsesolverconverged(amg.coarsesolverconverged),
293  coarseSmoother_(amg.coarseSmoother_), verbosity_(amg.verbosity_)
294  {
295  if(amg.rhs_)
296  rhs_=new Hierarchy<Range,A>(*amg.rhs_);
297  if(amg.lhs_)
298  lhs_=new Hierarchy<Domain,A>(*amg.lhs_);
299  if(amg.residual_)
300  residual_=new Hierarchy<Domain,A>(*amg.residual_);
301  }
302 
303  template<class M, class X, class PI, class A>
305  const Parameters& parms, bool symmetric_)
306  : matrices_(&matrices), solver_(&coarseSolver),
307  rhs_(), lhs_(), residual_(), scalarProduct_(),
308  gamma_(parms.getGamma()), preSteps_(parms.getNoPreSmoothSteps()),
309  postSteps_(parms.getNoPostSmoothSteps()), buildHierarchy_(false),
310  symmetric(symmetric_), coarsesolverconverged(true),
311  coarseSmoother_(), verbosity_(parms.debugLevel())
312  {
313  if(preSteps_>1||postSteps_>1)
314  {
315  std::cerr<<"WARNING only one step of smoothing is supported!"<<std::endl;
316  preSteps_=postSteps_=0;
317  }
318  assert(matrices_->isBuilt());
319  static_assert(std::is_same<PI,SequentialInformation>::value,
320  "Currently only sequential runs are supported");
321  }
322  template<class M, class X, class PI, class A>
323  template<class C>
325  const C& criterion,
326  const Parameters& parms,
327  bool symmetric_,
328  const PI& pinfo)
329  : solver_(), rhs_(), lhs_(), residual_(), scalarProduct_(), gamma_(parms.getGamma()),
330  preSteps_(parms.getNoPreSmoothSteps()), postSteps_(parms.getNoPostSmoothSteps()),
331  buildHierarchy_(true),
332  symmetric(symmetric_), coarsesolverconverged(true),
333  coarseSmoother_(), verbosity_(criterion.debugLevel())
334  {
335  if(preSteps_>1||postSteps_>1)
336  {
337  std::cerr<<"WARNING only one step of smoothing is supported!"<<std::endl;
338  preSteps_=postSteps_=1;
339  }
340  static_assert(std::is_same<PI,SequentialInformation>::value,
341  "Currently only sequential runs are supported");
342  // TODO: reestablish compile time checks.
343  //static_assert(static_cast<int>(PI::category)==static_cast<int>(S::category),
344  // "Matrix and Solver must match in terms of category!");
345  createHierarchies(criterion, const_cast<Operator&>(matrix), pinfo);
346  }
347 
348  template<class M, class X, class PI, class A>
350  {
351  if(buildHierarchy_) {
352  if(solver_)
353  solver_.reset();
354  if(coarseSmoother_)
355  coarseSmoother_.reset();
356  }
357  if(lhs_)
358  delete lhs_;
359  lhs_=nullptr;
360  if(residual_)
361  delete residual_;
362  residual_=nullptr;
363  if(rhs_)
364  delete rhs_;
365  rhs_=nullptr;
366  }
367 
368  template<class M, class X, class PI, class A>
369  template<class C>
370  void FastAMG<M,X,PI,A>::createHierarchies(C& criterion, Operator& matrix,
371  const PI& pinfo)
372  {
373  Timer watch;
374  matrices_.reset(new OperatorHierarchy(matrix, pinfo));
375 
376  matrices_->template build<NegateSet<typename PI::OwnerSet> >(criterion);
377 
378  if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
379  std::cout<<"Building Hierarchy of "<<matrices_->maxlevels()<<" levels took "<<watch.elapsed()<<" seconds."<<std::endl;
380 
381  if(buildHierarchy_ && matrices_->levels()==matrices_->maxlevels()) {
382  // We have the carsest level. Create the coarse Solver
383  typedef typename SmootherTraits<Smoother>::Arguments SmootherArgs;
384  SmootherArgs sargs;
385  sargs.iterations = 1;
386 
388  cargs.setArgs(sargs);
389  if(matrices_->redistributeInformation().back().isSetup()) {
390  // Solve on the redistributed partitioning
391  cargs.setMatrix(matrices_->matrices().coarsest().getRedistributed().getmat());
392  cargs.setComm(matrices_->parallelInformation().coarsest().getRedistributed());
393  }else{
394  cargs.setMatrix(matrices_->matrices().coarsest()->getmat());
395  cargs.setComm(*matrices_->parallelInformation().coarsest());
396  }
397 
398  coarseSmoother_.reset(ConstructionTraits<Smoother>::construct(cargs));
399  scalarProduct_ = createScalarProduct<X>(cargs.getComm(),category());
400 
401 #if HAVE_SUPERLU|| HAVE_SUITESPARSE_UMFPACK
402 #if HAVE_SUITESPARSE_UMFPACK
403 #define DIRECTSOLVER UMFPack
404 #else
405 #define DIRECTSOLVER SuperLU
406 #endif
407  // Use superlu if we are purely sequential or with only one processor on the coarsest level.
408  if(std::is_same<ParallelInformation,SequentialInformation>::value // sequential mode
409  || matrices_->parallelInformation().coarsest()->communicator().size()==1 //parallel mode and only one processor
410  || (matrices_->parallelInformation().coarsest().isRedistributed()
411  && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()==1
412  && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()>0)) { // redistribute and 1 proc
413  if(verbosity_>0 && matrices_->parallelInformation().coarsest()->communicator().rank()==0)
414  std::cout<<"Using superlu"<<std::endl;
415  if(matrices_->parallelInformation().coarsest().isRedistributed())
416  {
417  if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
418  // We are still participating on this level
419  solver_.reset(new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest().getRedistributed().getmat(), false, false));
420  else
421  solver_.reset();
422  }else
423  solver_.reset(new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest()->getmat(), false, false));
424  }else
425 #undef DIRECTSOLVER
426 #endif // HAVE_SUPERLU|| HAVE_SUITESPARSE_UMFPACK
427  {
428  if(matrices_->parallelInformation().coarsest().isRedistributed())
429  {
430  if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
431  // We are still participating on this level
432  solver_.reset(new BiCGSTABSolver<X>(const_cast<M&>(matrices_->matrices().coarsest().getRedistributed()),
433  *scalarProduct_,
434  *coarseSmoother_, 1E-2, 1000, 0));
435  else
436  solver_.reset();
437  }else
438  solver_.reset(new BiCGSTABSolver<X>(const_cast<M&>(*matrices_->matrices().coarsest()),
439  *scalarProduct_,
440  *coarseSmoother_, 1E-2, 1000, 0));
441  }
442  }
443 
444  if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
445  std::cout<<"Building Hierarchy of "<<matrices_->maxlevels()<<" levels took "<<watch.elapsed()<<" seconds."<<std::endl;
446  }
447 
448 
449  template<class M, class X, class PI, class A>
451  {
452  Timer watch, watch1;
453  // Detect Matrix rows where all offdiagonal entries are
454  // zero and set x such that A_dd*x_d=b_d
455  // Thus users can be more careless when setting up their linear
456  // systems.
457  typedef typename M::matrix_type Matrix;
458  typedef typename Matrix::ConstRowIterator RowIter;
459  typedef typename Matrix::ConstColIterator ColIter;
460  typedef typename Matrix::block_type Block;
461  Block zero;
462  zero=typename Matrix::field_type();
463 
464  const Matrix& mat=matrices_->matrices().finest()->getmat();
465  for(RowIter row=mat.begin(); row!=mat.end(); ++row) {
466  bool isDirichlet = true;
467  bool hasDiagonal = false;
468  ColIter diag;
469  for(ColIter col=row->begin(); col!=row->end(); ++col) {
470  if(row.index()==col.index()) {
471  diag = col;
472  hasDiagonal = (*col != zero);
473  }else{
474  if(*col!=zero)
475  isDirichlet = false;
476  }
477  }
478  if(isDirichlet && hasDiagonal)
479  diag->solve(x[row.index()], b[row.index()]);
480  }
481  if (verbosity_>0)
482  std::cout<<" Preprocessing Dirichlet took "<<watch1.elapsed()<<std::endl;
483  watch1.reset();
484  // No smoother to make x consistent! Do it by hand
485  matrices_->parallelInformation().coarsest()->copyOwnerToAll(x,x);
486  Range* copy = new Range(b);
487  if(rhs_)
488  delete rhs_;
489  rhs_ = new Hierarchy<Range,A>(copy);
490  Domain* dcopy = new Domain(x);
491  if(lhs_)
492  delete lhs_;
493  lhs_ = new Hierarchy<Domain,A>(dcopy);
494  dcopy = new Domain(x);
495  residual_ = new Hierarchy<Domain,A>(dcopy);
496  matrices_->coarsenVector(*rhs_);
497  matrices_->coarsenVector(*lhs_);
498  matrices_->coarsenVector(*residual_);
499 
500  // The preconditioner might change x and b. So we have to
501  // copy the changes to the original vectors.
502  x = *lhs_->finest();
503  b = *rhs_->finest();
504  }
505  template<class M, class X, class PI, class A>
507  {
508  return matrices_->levels();
509  }
510  template<class M, class X, class PI, class A>
512  {
513  return matrices_->maxlevels();
514  }
515 
517  template<class M, class X, class PI, class A>
519  {
520  LevelContext levelContext;
521  // Init all iterators for the current level
522  initIteratorsWithFineLevel(levelContext);
523 
524  assert(v.two_norm()==0);
525 
526  level=0;
527  if(matrices_->maxlevels()==1){
528  // The coarse solver might modify the d!
529  Range b(d);
530  mgc(levelContext, v, b);
531  }else
532  mgc(levelContext, v, d);
533  if(postSteps_==0||matrices_->maxlevels()==1)
534  levelContext.pinfo->copyOwnerToAll(v, v);
535  }
536 
537  template<class M, class X, class PI, class A>
538  void FastAMG<M,X,PI,A>::initIteratorsWithFineLevel(LevelContext& levelContext)
539  {
540  levelContext.matrix = matrices_->matrices().finest();
541  levelContext.pinfo = matrices_->parallelInformation().finest();
542  levelContext.redist =
543  matrices_->redistributeInformation().begin();
544  levelContext.aggregates = matrices_->aggregatesMaps().begin();
545  levelContext.lhs = lhs_->finest();
546  levelContext.residual = residual_->finest();
547  levelContext.rhs = rhs_->finest();
548  levelContext.level=0;
549  }
550 
551  template<class M, class X, class PI, class A>
552  bool FastAMG<M,X,PI,A>
553  ::moveToCoarseLevel(LevelContext& levelContext)
554  {
555  bool processNextLevel=true;
556 
557  if(levelContext.redist->isSetup()) {
558  throw "bla";
559  levelContext.redist->redistribute(static_cast<const Range&>(*levelContext.residual),
560  levelContext.residual.getRedistributed());
561  processNextLevel = levelContext.residual.getRedistributed().size()>0;
562  if(processNextLevel) {
563  //restrict defect to coarse level right hand side.
564  ++levelContext.pinfo;
566  ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
567  static_cast<const Range&>(levelContext.residual.getRedistributed()),
568  *levelContext.pinfo);
569  }
570  }else{
571  //restrict defect to coarse level right hand side.
572  ++levelContext.rhs;
573  ++levelContext.pinfo;
575  ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
576  static_cast<const Range&>(*levelContext.residual), *levelContext.pinfo);
577  }
578 
579  if(processNextLevel) {
580  // prepare coarse system
581  ++levelContext.residual;
582  ++levelContext.lhs;
583  ++levelContext.matrix;
584  ++levelContext.level;
585  ++levelContext.redist;
586 
587  if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
588  // next level is not the globally coarsest one
589  ++levelContext.aggregates;
590  }
591  // prepare the lhs on the next level
592  *levelContext.lhs=0;
593  *levelContext.residual=0;
594  }
595  return processNextLevel;
596  }
597 
598  template<class M, class X, class PI, class A>
599  void FastAMG<M,X,PI,A>
600  ::moveToFineLevel(LevelContext& levelContext, bool processNextLevel, Domain& x)
601  {
602  if(processNextLevel) {
603  if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
604  // previous level is not the globally coarsest one
605  --levelContext.aggregates;
606  }
607  --levelContext.redist;
608  --levelContext.level;
609  //prolongate and add the correction (update is in coarse left hand side)
610  --levelContext.matrix;
611  --levelContext.residual;
612 
613  }
614 
615  typename Hierarchy<Domain,A>::Iterator coarseLhs = levelContext.lhs--;
616  if(levelContext.redist->isSetup()) {
617 
618  // Need to redistribute during prolongate
620  ::prolongateVector(*(*levelContext.aggregates), *coarseLhs, x,
621  levelContext.lhs.getRedistributed(),
622  matrices_->getProlongationDampingFactor(),
623  *levelContext.pinfo, *levelContext.redist);
624  }else{
626  ::prolongateVector(*(*levelContext.aggregates), *coarseLhs, x,
627  matrices_->getProlongationDampingFactor(), *levelContext.pinfo);
628 
629  // printvector(std::cout, *lhs, "prolongated coarse grid correction", "lhs", 10, 10, 10);
630  }
631 
632 
633  if(processNextLevel) {
634  --levelContext.rhs;
635  }
636 
637  }
638 
639 
640  template<class M, class X, class PI, class A>
641  void FastAMG<M,X,PI,A>
642  ::presmooth(LevelContext& levelContext, Domain& x, const Range& b)
643  {
645  x,
646  *levelContext.residual,
647  b);
648  }
649 
650  template<class M, class X, class PI, class A>
651  void FastAMG<M,X,PI,A>
652  ::postsmooth(LevelContext& levelContext, Domain& x, const Range& b)
653  {
655  ::apply(levelContext.matrix->getmat(), x, *levelContext.residual, b);
656  }
657 
658 
659  template<class M, class X, class PI, class A>
661  {
663  }
664 
665  template<class M, class X, class PI, class A>
666  void FastAMG<M,X,PI,A>::mgc(LevelContext& levelContext, Domain& v, const Range& b){
667 
668  if(levelContext.matrix == matrices_->matrices().coarsest() && levels()==maxlevels()) {
669  // Solve directly
671  res.converged=true; // If we do not compute this flag will not get updated
672  if(levelContext.redist->isSetup()) {
673  levelContext.redist->redistribute(b, levelContext.rhs.getRedistributed());
674  if(levelContext.rhs.getRedistributed().size()>0) {
675  // We are still participating in the computation
676  levelContext.pinfo.getRedistributed().copyOwnerToAll(levelContext.rhs.getRedistributed(),
677  levelContext.rhs.getRedistributed());
678  solver_->apply(levelContext.lhs.getRedistributed(), levelContext.rhs.getRedistributed(), res);
679  }
680  levelContext.redist->redistributeBackward(v, levelContext.lhs.getRedistributed());
681  levelContext.pinfo->copyOwnerToAll(v, v);
682  }else{
683  levelContext.pinfo->copyOwnerToAll(b, b);
684  solver_->apply(v, const_cast<Range&>(b), res);
685  }
686 
687  // printvector(std::cout, *lhs, "coarse level update", "u", 10, 10, 10);
688  // printvector(std::cout, *rhs, "coarse level rhs", "rhs", 10, 10, 10);
689  if (!res.converged)
690  coarsesolverconverged = false;
691  }else{
692  // presmoothing
693  presmooth(levelContext, v, b);
694  // printvector(std::cout, *lhs, "update", "u", 10, 10, 10);
695  // printvector(std::cout, *residual, "post presmooth residual", "r", 10);
696 #ifndef DUNE_AMG_NO_COARSEGRIDCORRECTION
697  bool processNextLevel = moveToCoarseLevel(levelContext);
698 
699  if(processNextLevel) {
700  // next level
701  for(std::size_t i=0; i<gamma_; i++)
702  mgc(levelContext, *levelContext.lhs, *levelContext.rhs);
703  }
704 
705  moveToFineLevel(levelContext, processNextLevel, v);
706 #else
707  *lhs=0;
708 #endif
709 
710  if(levelContext.matrix == matrices_->matrices().finest()) {
711  coarsesolverconverged = matrices_->parallelInformation().finest()->communicator().prod(coarsesolverconverged);
712  if(!coarsesolverconverged)
713  DUNE_THROW(MathError, "Coarse solver did not converge");
714  }
715 
716  // printvector(std::cout, *lhs, "update corrected", "u", 10, 10, 10);
717  // postsmoothing
718  postsmooth(levelContext, v, b);
719  // printvector(std::cout, *lhs, "update postsmoothed", "u", 10, 10, 10);
720 
721  }
722  }
723 
724 
726  template<class M, class X, class PI, class A>
728  {
729  DUNE_UNUSED_PARAMETER(x);
730  delete lhs_;
731  lhs_=nullptr;
732  delete rhs_;
733  rhs_=nullptr;
734  delete residual_;
735  residual_=nullptr;
736  }
737 
738  template<class M, class X, class PI, class A>
739  template<class A1>
740  void FastAMG<M,X,PI,A>::getCoarsestAggregateNumbers(std::vector<std::size_t,A1>& cont)
741  {
742  matrices_->getCoarsestAggregatesOnFinest(cont);
743  }
744 
745  } // end namespace Amg
746 } // end namespace Dune
747 
748 #endif
Dune::Amg::GaussSeidelPostsmoothDefect::apply
static void apply(const M &A, X &x, Y &d, const Y &b)
Definition: fastamgsmoother.hh:53
umfpack.hh
Classes for using UMFPack with ISTL matrices.
Dune::SolverCategory::Category
Category
Definition: solvercategory.hh:21
Dune::Amg::FastAMG::ParallelInformation
PI ParallelInformation
The type of the parallel information. Either OwnerOverlapCommunication or another type describing the...
Definition: fastamg.hh:69
Dune::SolverCategory::sequential
@ sequential
Category for sequential solvers.
Definition: solvercategory.hh:23
Dune::Amg::FastAMG::OperatorHierarchy
MatrixHierarchy< M, ParallelInformation, A > OperatorHierarchy
The operator hierarchy type.
Definition: fastamg.hh:71
Dune::Amg::SmootherTraits
Traits class for getting the attribute class of a smoother.
Definition: smoother.hh:63
Dune::Amg::FastAMG::LevelContext::redist
OperatorHierarchy::RedistributeInfoList::const_iterator redist
The iterator over the redistribution information.
Definition: fastamg.hh:192
transfer.hh
Prolongation and restriction for amg.
Dune::Amg::FastAMG::CoarseSolver
InverseOperator< X, X > CoarseSolver
the type of the coarse solver.
Definition: fastamg.hh:80
Dune::Amg::FastAMG::maxlevels
std::size_t maxlevels()
Definition: fastamg.hh:511
Dune::Amg::FastAMG::~FastAMG
~FastAMG()
Definition: fastamg.hh:349
fastamgsmoother.hh
Dune::Amg::FastAMG::LevelContext::level
std::size_t level
The level index.
Definition: fastamg.hh:212
mat
Matrix & mat
Definition: matrixmatrix.hh:345
Dune::MatrixMarketImpl::symmetric
@ symmetric
Definition: matrixmarket.hh:260
Dune::Amg::FastAMG::LevelContext::matrix
OperatorHierarchy::ParallelMatrixHierarchy::ConstIterator matrix
The iterator over the matrices.
Definition: fastamg.hh:184
Dune::Amg::ConstructionTraits
Traits class for generically constructing non default constructable types.
Definition: novlpschwarz.hh:247
Dune::Amg::Transfer::prolongateVector
static void prolongateVector(const AggregatesMap< Vertex > &aggregates, Vector &coarse, Vector &fine, Vector &fineRedist, T1 damp, R &redistributor=R())
Dune::Matrix
A generic dynamic dense matrix.
Definition: matrix.hh:554
Dune::InverseOperator< X, X >
Dune::InverseOperatorResult::converged
bool converged
True if convergence criterion has been met.
Definition: solver.hh:65
Dune::Amg::FastAMG::post
void post(Domain &x)
Clean up.
Definition: fastamg.hh:727
hierarchy.hh
Provides a classes representing the hierarchies in AMG.
scalarproducts.hh
Define base class for scalar product and norm.
Dune::Amg::FastAMG::apply
void apply(Domain &v, const Range &d)
Apply one step of the preconditioner to the system A(v)=d.
Definition: fastamg.hh:518
Dune::Amg::FastAMG::FastAMG
FastAMG(const OperatorHierarchy &matrices, CoarseSolver &coarseSolver, const Parameters &parms, bool symmetric=true)
Construct a new amg with a specific coarse solver.
Definition: fastamg.hh:304
Dune::Amg::FastAMG::Operator
M Operator
The matrix operator type.
Definition: fastamg.hh:62
Dune::MatrixImp::DenseMatrixBase::ConstIterator
ConstIterator class for sequential access.
Definition: matrix.hh:397
Dune::InverseOperatorResult
Statistics about the application of an inverse operator.
Definition: solver.hh:40
Dune::Amg::Hierarchy
A hierarchy of coantainers (e.g. matrices or vectors)
Definition: hierarchy.hh:66
Dune::Matrix::field_type
T::field_type field_type
Export the type representing the underlying field.
Definition: matrix.hh:559
Dune::Amg::FastAMG::category
virtual SolverCategory::Category category() const
Category of the preconditioner (see SolverCategory::Category)
Definition: fastamg.hh:124
smoother.hh
Classes for the generic construction and application of the smoothers.
Dune::Amg::FastAMG::usesDirectCoarseLevelSolver
bool usesDirectCoarseLevelSolver() const
Check whether the coarse solver used is a direct solver.
Definition: fastamg.hh:660
Dune::Amg::presmooth
void presmooth(LevelContext &levelContext, size_t steps)
Apply pre smoothing on the current level.
Definition: smoother.hh:454
Dune::Amg::FastAMG::Range
X Range
The range type.
Definition: fastamg.hh:78
preconditioners.hh
Define general preconditioner interface.
Dune::ScalarProduct
Base class for scalar product and norm computation.
Definition: scalarproducts.hh:46
Dune::Amg::FastAMG::levels
std::size_t levels()
Definition: fastamg.hh:506
Dune::Matrix::ConstColIterator
row_type::const_iterator ConstColIterator
Const iterator for the entries of each row.
Definition: matrix.hh:583
Dune::Amg::Hierarchy< ParallelInformation, Allocator >::Iterator
LevelIterator< Hierarchy< ParallelInformation, Allocator >, ParallelInformation > Iterator
Type of the mutable iterator.
Definition: hierarchy.hh:250
Dune::Matrix::block_type
T block_type
Export the type representing the components.
Definition: matrix.hh:562
Dune::IsDirectSolver
Definition: solvertype.hh:13
solvers.hh
Implementations of the inverse operator interface.
Dune::Amg::Hierarchy< MatrixOperator, Allocator >::ConstIterator
LevelIterator< const Hierarchy< MatrixOperator, Allocator >, const MatrixOperator > ConstIterator
Type of the const iterator.
Definition: hierarchy.hh:253
Dune::Amg::FastAMG::pre
void pre(Domain &x, Range &b)
Prepare the preconditioner.
Definition: fastamg.hh:450
Dune::Amg::FastAMG::LevelContext::pinfo
ParallelInformationHierarchy::Iterator pinfo
The iterator over the parallel information.
Definition: fastamg.hh:188
io.hh
Some generic functions for pretty printing vectors and matrices.
Dune::Amg::FastAMG::LevelContext::lhs
Hierarchy< Domain, A >::Iterator lhs
The iterator over the left hand side.
Definition: fastamg.hh:200
Dune::Amg::FastAMG::getCoarsestAggregateNumbers
void getCoarsestAggregateNumbers(std::vector< std::size_t, A1 > &cont)
Get the aggregate number of each unknown on the coarsest level.
Definition: fastamg.hh:740
Dune::Amg::FastAMG::ParallelInformationHierarchy
OperatorHierarchy::ParallelInformationHierarchy ParallelInformationHierarchy
The parallal data distribution hierarchy type.
Definition: fastamg.hh:73
Dune::Amg::FastAMG::LevelContext::residual
Hierarchy< Domain, A >::Iterator residual
The iterator over the residuals.
Definition: fastamg.hh:204
Dune::Amg::FastAMG
A fast (sequential) algebraic multigrid based on agglomeration that saves memory bandwidth.
Definition: fastamg.hh:58
Dune::Amg::Parameters
All parameters for AMG.
Definition: parameters.hh:390
superlu.hh
Classes for using SuperLU with ISTL matrices.
Dune
Definition: allocator.hh:7
Dune::Amg::postsmooth
void postsmooth(LevelContext &levelContext, size_t steps)
Apply post smoothing on the current level.
Definition: smoother.hh:476
solvertype.hh
Templates characterizing the type of a solver.
Dune::Amg::Transfer::restrictVector
static void restrictVector(const AggregatesMap< Vertex > &aggregates, Vector &coarse, const Vector &fine, T &comm)
Dune::Amg::GaussSeidelPresmoothDefect::apply
static void apply(const M &A, X &x, Y &d, const Y &b)
Definition: fastamgsmoother.hh:17
Dune::Amg::FastAMG::LevelContext::aggregates
OperatorHierarchy::AggregatesMapList::const_iterator aggregates
The iterator over the aggregates maps.
Definition: fastamg.hh:196
Dune::SeqSSOR
Sequential SSOR preconditioner.
Definition: preconditioners.hh:135
col
Col col
Definition: matrixmatrix.hh:349
Dune::Preconditioner
Base class for matrix free definition of preconditioners.
Definition: preconditioner.hh:30
Dune::Amg::FastAMG::Domain
X Domain
The domain type.
Definition: fastamg.hh:76
Dune::Amg::FastAMG::LevelContext::rhs
Hierarchy< Range, A >::Iterator rhs
The iterator over the right hand sided.
Definition: fastamg.hh:208
Dune::Amg::ConstructionTraits::construct
static T * construct(Arguments &args)
Construct an object with the specified arguments.
Definition: construction.hh:52
Dune::Amg::MatrixHierarchy
The hierarchies build by the coarsening process.
Definition: hierarchy.hh:309
Dune::Amg::FastAMG::recalculateHierarchy
void recalculateHierarchy()
Recalculate the matrix hierarchy.
Definition: fastamg.hh:151