dune-pdelab  2.4-dev
parallelhelper.hh
Go to the documentation of this file.
1 // -*- tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 // vi: set et ts=8 sw=2 sts=2:
3 #ifndef DUNE_PDELAB_BACKEND_ISTL_PARALLELHELPER_HH
4 #define DUNE_PDELAB_BACKEND_ISTL_PARALLELHELPER_HH
5 
6 #include <limits>
7 
8 #include <dune/common/deprecated.hh>
9 #include <dune/common/parallel/mpihelper.hh>
10 #include <dune/common/stdstreams.hh>
11 
12 #include <dune/istl/owneroverlapcopy.hh>
13 #include <dune/istl/solvercategory.hh>
14 #include <dune/istl/operators.hh>
15 #include <dune/istl/solvers.hh>
16 #include <dune/istl/preconditioners.hh>
17 #include <dune/istl/scalarproducts.hh>
18 #include <dune/istl/paamg/amg.hh>
19 #include <dune/istl/paamg/pinfo.hh>
20 #include <dune/istl/io.hh>
21 #include <dune/istl/superlu.hh>
22 
29 
30 namespace Dune {
31  namespace PDELab {
32  namespace istl {
33 
37 
38 
39  //========================================================
40  // A parallel helper class providing a nonoverlapping
41  // decomposition of all degrees of freedom
42  //========================================================
43 
44  template<typename GFS>
46  {
47 
49  typedef int RankIndex;
50 
54  using GhostVector = Dune::PDELab::Backend::Vector<GFS,bool>;
55 
57  typedef typename GFS::Ordering::Traits::ContainerIndex ContainerIndex;
58 
59  public:
60 
61  ParallelHelper (const GFS& gfs, int verbose = 1)
62  : _gfs(gfs)
63  , _rank(gfs.gridView().comm().rank())
64  , _ranks(gfs,_rank)
65  , _ghosts(gfs,false)
66  , _verbose(verbose)
67  {
68 
69  // Let's try to be clever and reduce the communication overhead by picking the smallest
70  // possible communication interface depending on the overlap structure of the GFS.
71  if (gfs.ordering().containedPartitions() == NonOverlappingPartitionSelector::partition_mask)
72  {
73  // The GFS only spans the interior and border partitions, so we can skip sending or
74  // receiving anything else.
75  _interiorBorder_all_interface = InteriorBorder_InteriorBorder_Interface;
76  _all_all_interface = InteriorBorder_InteriorBorder_Interface;
77  }
78  else
79  {
80  // In general, we have to transmit more.
81  _interiorBorder_all_interface = InteriorBorder_All_Interface;
82  _all_all_interface = All_All_Interface;
83  }
84 
85  if (_gfs.gridView().comm().size()>1)
86  {
87 
88  // find out about ghosts
89  //GFSDataHandle<GFS,GhostVector,GhostGatherScatter>
91  gdh(_gfs,_ghosts,false);
92  _gfs.gridView().communicate(gdh,_interiorBorder_all_interface,Dune::ForwardCommunication);
93 
94  // create disjoint DOF partitioning
95  // GFSDataHandle<GFS,RankVector,DisjointPartitioningGatherScatter<RankIndex> >
96  // ibdh(_gfs,_ranks,DisjointPartitioningGatherScatter<RankIndex>(_rank));
98  _gfs.gridView().communicate(pdh,_interiorBorder_all_interface,Dune::ForwardCommunication);
99 
100  }
101 
102  }
103 
105  template<typename X>
106  void maskForeignDOFs(X& x) const
107  {
108  using Backend::native;
109  // dispatch to implementation.
111  }
112 
113  private:
114 
115  // Implementation for block vector; recursively masks blocks.
116  template<typename X, typename Mask>
117  void maskForeignDOFs(istl::tags::block_vector, X& x, const Mask& mask) const
118  {
119  typename Mask::const_iterator mask_it = mask.begin();
120  for (typename X::iterator it = x.begin(),
121  end_it = x.end();
122  it != end_it;
123  ++it, ++mask_it)
124  maskForeignDOFs(istl::container_tag(*it),*it,*mask_it);
125  }
126 
127  // Implementation for field vector, iterates over entries and masks them individually.
128  template<typename X, typename Mask>
129  void maskForeignDOFs(istl::tags::field_vector, X& x, const Mask& mask) const
130  {
131  typename Mask::const_iterator mask_it = mask.begin();
132  for (typename X::iterator it = x.begin(),
133  end_it = x.end();
134  it != end_it;
135  ++it, ++mask_it)
136  *it = (*mask_it == _rank ? *it : typename X::field_type(0));
137  }
138 
139  public:
140 
142  bool owned(const ContainerIndex& i) const
143  {
144  return _ranks[i] == _rank;
145  }
146 
148  bool isGhost(const ContainerIndex& i) const
149  {
150  return _ghosts[i];
151  }
152 
154  template<typename X, typename Y>
155  typename PromotionTraits<
156  typename X::field_type,
157  typename Y::field_type
158  >::PromotedType
159  disjointDot(const X& x, const Y& y) const
160  {
161  using Backend::native;
163  native(x),
164  native(y),
165  native(_ranks)
166  );
167  }
168 
169  private:
170 
171  // Implementation for BlockVector, collects the result of recursively
172  // invoking the algorithm on the vector blocks.
173  template<typename X, typename Y, typename Mask>
174  typename PromotionTraits<
175  typename X::field_type,
176  typename Y::field_type
177  >::PromotedType
178  disjointDot(istl::tags::block_vector, const X& x, const Y& y, const Mask& mask) const
179  {
180  typedef typename PromotionTraits<
181  typename X::field_type,
182  typename Y::field_type
183  >::PromotedType result_type;
184 
185  result_type r(0);
186 
187  typename Y::const_iterator y_it = y.begin();
188  typename Mask::const_iterator mask_it = mask.begin();
189  for (typename X::const_iterator x_it = x.begin(),
190  end_it = x.end();
191  x_it != end_it;
192  ++x_it, ++y_it, ++mask_it)
193  r += disjointDot(istl::container_tag(*x_it),*x_it,*y_it,*mask_it);
194 
195  return r;
196  }
197 
198  // Implementation for FieldVector, iterates over the entries and calls Dune::dot() for DOFs
199  // associated with the current rank.
200  template<typename X, typename Y, typename Mask>
201  typename PromotionTraits<
202  typename X::field_type,
203  typename Y::field_type
204  >::PromotedType
205  disjointDot(istl::tags::field_vector, const X& x, const Y& y, const Mask& mask) const
206  {
207  typedef typename PromotionTraits<
208  typename X::field_type,
209  typename Y::field_type
210  >::PromotedType result_type;
211 
212  result_type r(0);
213 
214  typename Y::const_iterator y_it = y.begin();
215  typename Mask::const_iterator mask_it = mask.begin();
216  for (typename X::const_iterator x_it = x.begin(),
217  end_it = x.end();
218  x_it != end_it;
219  ++x_it, ++y_it, ++mask_it)
220  r += (*mask_it == _rank ? Dune::dot(*x_it,*y_it) : result_type(0));
221 
222  return r;
223  }
224 
225  public:
226 
228  RankIndex rank() const
229  {
230  return _rank;
231  }
232 
233 #if HAVE_MPI
234 
236 
252  template<typename MatrixType, typename Comm>
253  void createIndexSetAndProjectForAMG(MatrixType& m, Comm& c);
254 
255  private:
256 
257  // Checks whether a matrix block is owned by the current process. Used for the AMG
258  // construction and thus assumes a single level of blocking and blocks with ownership
259  // restricted to a single DOF.
260  bool owned_for_amg(std::size_t i) const
261  {
262  return Backend::native(_ranks)[i][0] == _rank;
263  }
264 
265  // Checks whether a matrix block is associated with a ghost entity. Used for the AMG
266  // construction and thus assumes a single level of blocking and blocks with ownership
267  // restricted to a single DOF.
268  bool is_ghost_for_amg(std::size_t i) const
269  {
270  return Backend::native(_ghosts)[i][0];
271  }
272 
273 #endif // HAVE_MPI
274 
275  private:
276 
277  const GFS& _gfs;
278  const RankIndex _rank;
279  RankVector _ranks; // vector to identify unique decomposition
280  GhostVector _ghosts; //vector to identify ghost dofs
281  int _verbose; //verbosity
282 
284  InterfaceType _interiorBorder_all_interface;
285 
287  InterfaceType _all_all_interface;
288  };
289 
290 #if HAVE_MPI
291 
292  template<typename GFS>
293  template<typename M, typename C>
295  {
296 
297  using Backend::native;
298 
299  const bool is_bcrs_matrix =
300  is_same<
301  typename istl::tags::container<
303  >::type::base_tag,
305  >::value;
306 
307  const bool block_type_is_field_matrix =
308  is_same<
309  typename istl::tags::container<
311  >::type::base_tag,
313  >::value;
314 
315  // We assume M to be a BCRSMatrix in the following, so better check for that
316  static_assert(is_bcrs_matrix && block_type_is_field_matrix, "matrix structure not compatible with AMG");
317 
318  // ********************************************************************************
319  // In the following, the code will always assume that all DOFs stored in a single
320  // block of the BCRSMatrix are attached to the same entity and can be handled
321  // identically. For that reason, the code often restricts itself to inspecting the
322  // first entry of the blocks in the diverse BlockVectors.
323  // ********************************************************************************
324 
325  typedef typename GFS::Traits::GridViewType GV;
326  typedef typename RankVector::size_type size_type;
327  const GV& gv = _gfs.gridView();
328 
329  // Do we need to communicate at all?
330  const bool need_communication = _gfs.gridView().comm().size() > 1;
331 
332  // First find out which dofs we share with other processors
333  using BoolVector = Backend::Vector<GFS,bool>;
334  BoolVector sharedDOF(_gfs, false);
335 
336  if (need_communication)
337  {
338  SharedDOFDataHandle<GFS,BoolVector> data_handle(_gfs,sharedDOF,false);
339  _gfs.gridView().communicate(data_handle,_all_all_interface,Dune::ForwardCommunication);
340  }
341 
342  // Count shared dofs that we own
343  typedef typename C::ParallelIndexSet::GlobalIndex GlobalIndex;
344  GlobalIndex count = 0;
345 
346  for (size_type i = 0; i < sharedDOF.N(); ++i)
347  if (owned_for_amg(i) && native(sharedDOF)[i][0])
348  ++count;
349 
350  dverb << gv.comm().rank() << ": shared block count is " << count.touint() << std::endl;
351 
352  // Communicate per-rank count of owned and shared DOFs to all processes.
353  std::vector<GlobalIndex> counts(_gfs.gridView().comm().size());
354  _gfs.gridView().comm().allgather(&count, 1, &(counts[0]));
355 
356  // Compute start index start_p = \sum_{i=0}^{i<p} counts_i
357  GlobalIndex start = std::accumulate(counts.begin(),counts.begin() + _rank,GlobalIndex(0));
358 
360  GIVector scalarIndices(_gfs, std::numeric_limits<GlobalIndex>::max());
361 
362  for (size_type i = 0; i < sharedDOF.N(); ++i)
363  if (owned_for_amg(i) && native(sharedDOF)[i][0])
364  {
365  native(scalarIndices)[i][0] = start;
366  ++start;
367  }
368 
369  // Publish global indices for the shared DOFS to other processors.
370  if (need_communication)
371  {
372  MinDataHandle<GFS,GIVector> data_handle(_gfs,scalarIndices);
373  _gfs.gridView().communicate(data_handle,_interiorBorder_all_interface,Dune::ForwardCommunication);
374  }
375 
376  // Setup the index set
377  c.indexSet().beginResize();
378  for (size_type i=0; i<scalarIndices.N(); ++i)
379  {
380  Dune::OwnerOverlapCopyAttributeSet::AttributeSet attr;
381  if(native(scalarIndices)[i][0] != std::numeric_limits<GlobalIndex>::max())
382  {
383  // global index exist in index set
384  if (owned_for_amg(i))
385  {
386  // This dof is managed by us.
387  attr = Dune::OwnerOverlapCopyAttributeSet::owner;
388  }
389  else if (is_ghost_for_amg(i) && c.getSolverCategory() == static_cast<int>(SolverCategory::nonoverlapping))
390  {
391  //use attribute overlap for ghosts in novlp grids
392  attr = Dune::OwnerOverlapCopyAttributeSet::overlap;
393  }
394  else
395  {
396  attr = Dune::OwnerOverlapCopyAttributeSet::copy;
397  }
398  c.indexSet().add(native(scalarIndices)[i][0], typename C::ParallelIndexSet::LocalIndex(i,attr));
399  }
400  }
401  c.indexSet().endResize();
402 
403  // Compute neighbors using communication
404  std::set<int> neighbors;
405 
406  if (need_communication)
407  {
408  GFSNeighborDataHandle<GFS,int> data_handle(_gfs,_rank,neighbors);
409  _gfs.gridView().communicate(data_handle,_all_all_interface,Dune::ForwardCommunication);
410  }
411 
412  c.remoteIndices().setNeighbours(neighbors);
413  c.remoteIndices().template rebuild<false>();
414  }
415 
416 #endif // HAVE_MPI
417 
418  template<int s, bool isFakeMPIHelper>
420  {
421  typedef Dune::Amg::SequentialInformation type;
422  };
423 
424 
425 #if HAVE_MPI
426 
427  // Need MPI for OwnerOverlapCopyCommunication
428  template<int s>
429  struct CommSelector<s,false>
430  {
431  typedef OwnerOverlapCopyCommunication<bigunsignedint<s>,int> type;
432  };
433 
434 #endif // HAVE_MPI
435 
437 
438  } // namespace istl
439  } // namespace PDELab
440 } // namespace Dune
441 
442 #endif // DUNE_PDELAB_BACKEND_ISTL_PARALLELHELPER_HH
GatherScatter data handle for creating a disjoint DOF partitioning.
Definition: genericdatahandle.hh:930
bool owned(const ContainerIndex &i) const
Tests whether the given index is owned by this process.
Definition: parallelhelper.hh:142
tags::container< T >::type container_tag(const T &)
Gets instance of container tag associated with T.
Definition: backend/istl/tags.hh:246
Data handle for marking shared DOFs.
Definition: genericdatahandle.hh:1014
Tag describing a BlockVector.
Definition: backend/istl/tags.hh:23
bool isGhost(const ContainerIndex &i) const
Tests whether the given index belongs to a ghost DOF.
Definition: parallelhelper.hh:148
void maskForeignDOFs(X &x) const
Mask out all DOFs not owned by the current process with 0.
Definition: parallelhelper.hh:106
typename impl::BackendVectorSelector< GridFunctionSpace, FieldType >::Type Vector
alias of the return type of BackendVectorSelector
Definition: backend/interface.hh:112
Data handle for collecting set of neighboring MPI ranks.
Definition: genericdatahandle.hh:1060
Definition: parallelhelper.hh:45
static const unsigned int value
Definition: gridfunctionspace/tags.hh:175
Tag describing a BCRSMatrix.
Definition: backend/istl/tags.hh:60
ParallelHelper(const GFS &gfs, int verbose=1)
Definition: parallelhelper.hh:61
std::enable_if< std::is_base_of< impl::WrapperBase, T >::value, Native< T > & >::type native(T &t)
Definition: backend/interface.hh:198
Definition: genericdatahandle.hh:716
OwnerOverlapCopyCommunication< bigunsignedint< s >, int > type
Definition: parallelhelper.hh:431
Tag describing an arbitrary FieldVector.
Definition: backend/istl/tags.hh:43
Definition: adaptivity.hh:27
Dune::Amg::SequentialInformation type
Definition: parallelhelper.hh:421
typename native_type< T >::type Native
Alias of the native container type associated with T or T itself if it is not a backend wrapper...
Definition: backend/interface.hh:182
void createIndexSetAndProjectForAMG(MatrixType &m, Comm &c)
Makes the matrix consistent and creates the parallel information for AMG.
Tag describing an arbitrary FieldMatrix.
Definition: backend/istl/tags.hh:80
PromotionTraits< typename X::field_type, typename Y::field_type >::PromotedType disjointDot(const X &x, const Y &y) const
Calculates the (rank-local) dot product of x and y on the disjoint partition defined by the helper...
Definition: parallelhelper.hh:159
const std::string s
Definition: function.hh:1102
RankIndex rank() const
Returns the MPI rank of this process.
Definition: parallelhelper.hh:228
Definition: parallelhelper.hh:419
Data handle for marking ghost DOFs.
Definition: genericdatahandle.hh:812
Extracts the container tag from T.
Definition: backend/istl/tags.hh:142