dune-grid  2.4
parmetisgridpartitioner.hh
Go to the documentation of this file.
1 #ifndef DUNE_GRID_UTILITY_PARMETISGRIDPARTITIONER_HH
2 #define DUNE_GRID_UTILITY_PARMETISGRIDPARTITIONER_HH
3 
8 #include <algorithm>
9 #include <vector>
10 
11 #include <dune/common/parallel/mpihelper.hh>
12 #include <dune/common/exceptions.hh>
13 
14 #include <dune/geometry/referenceelements.hh>
15 
18 
19 #if HAVE_PARMETIS
20 #include <parmetis.h>
21 
22 namespace Dune
23 {
24 
29  template<class GridView>
30  struct ParMetisGridPartitioner {
31 #if PARMETIS_MAJOR_VERSION < 4
32  typedef idxtype idx_t;
33  typedef float real_t;
34 #endif
35 
36  typedef typename GridView::template Codim<0>::Iterator ElementIterator;
37  typedef typename GridView::template Codim<0>::template Partition<Interior_Partition>::Iterator InteriorElementIterator;
38  typedef typename GridView::IntersectionIterator IntersectionIterator;
39 
40  enum {
41  dimension = GridView::dimension
42  };
43 
44 
55  static std::vector<unsigned> partition(const GridView& gv, const Dune::MPIHelper& mpihelper) {
56  const unsigned numElements = gv.size(0);
57 
58  std::vector<unsigned> part(numElements);
59 
60  // Setup parameters for ParMETIS
61  idx_t wgtflag = 0; // we don't use weights
62  idx_t numflag = 0; // we are using C-style arrays
63  idx_t ncon = 1; // number of balance constraints
64  idx_t ncommonnodes = 2; // number of nodes elements must have in common to be considered adjacent to each other
65  idx_t options[4] = {0, 0, 0, 0}; // use default values for random seed, output and coupling
66  idx_t edgecut; // will store number of edges cut by partition
67  idx_t nparts = mpihelper.size(); // number of parts equals number of processes
68  std::vector<real_t> tpwgts(ncon*nparts, 1./nparts); // load per subdomain and weight (same load on every process)
69  std::vector<real_t> ubvec(ncon, 1.05); // weight tolerance (same weight tolerance for every weight there is)
70 
71  // The difference elmdist[i+1] - elmdist[i] is the number of nodes that are on process i
72  std::vector<idx_t> elmdist(nparts+1);
73  elmdist[0] = 0;
74  std::fill(elmdist.begin()+1, elmdist.end(), gv.size(0)); // all elements are on process zero
75 
76  // Create and fill arrays "eptr", where eptr[i] is the number of vertices that belong to the i-th element, and
77  // "eind" contains the vertex-numbers of the i-the element in eind[eptr[i]] to eind[eptr[i+1]-1]
78  std::vector<idx_t> eptr, eind;
79  int numVertices = 0;
80  eptr.push_back(numVertices);
81 
82  for (InteriorElementIterator eIt = gv.template begin<0, Interior_Partition>(); eIt != gv.template end<0, Interior_Partition>(); ++eIt) {
83  const int curNumVertices = ReferenceElements<double, dimension>::general(eIt->type()).size(dimension);
84 
85  numVertices += curNumVertices;
86  eptr.push_back(numVertices);
87 
88  for (size_t k = 0; k < curNumVertices; ++k)
89  eind.push_back(gv.indexSet().subIndex(*eIt, k, dimension));
90  }
91 
92  // Partition mesh using ParMETIS
93  if (0 == mpihelper.rank()) {
94  MPI_Comm comm = Dune::MPIHelper::getLocalCommunicator();
95 
96 #if PARMETIS_MAJOR_VERSION >= 4
97  const int OK =
98 #endif
99  ParMETIS_V3_PartMeshKway(elmdist.data(), eptr.data(), eind.data(), NULL, &wgtflag, &numflag,
100  &ncon, &ncommonnodes, &nparts, tpwgts.data(), ubvec.data(),
101  options, &edgecut, reinterpret_cast<idx_t*>(part.data()), &comm);
102 
103 #if PARMETIS_MAJOR_VERSION >= 4
104  if (OK != METIS_OK)
105  DUNE_THROW(Dune::Exception, "ParMETIS returned an error code.");
106 #endif
107  }
108 
109  return part;
110  }
111 
123  static std::vector<unsigned> repartition(const GridView& gv, const Dune::MPIHelper& mpihelper, real_t& itr = 1000) {
124 
125  // Create global index map
126  GlobalIndexSet<GridView> globalIndex(gv,0);
127 
128  int numElements = std::distance(gv.template begin<0, Interior_Partition>(),
129  gv.template end<0, Interior_Partition>());
130 
131  std::vector<unsigned> interiorPart(numElements);
132 
133  // Setup parameters for ParMETIS
134  idx_t wgtflag = 0; // we don't use weights
135  idx_t numflag = 0; // we are using C-style arrays
136  idx_t ncon = 1; // number of balance constraints
137  idx_t options[4] = {0, 0, 0, 0}; // use default values for random seed, output and coupling
138  idx_t edgecut; // will store number of edges cut by partition
139  idx_t nparts = mpihelper.size(); // number of parts equals number of processes
140  std::vector<real_t> tpwgts(ncon*nparts, 1./nparts); // load per subdomain and weight (same load on every process)
141  std::vector<real_t> ubvec(ncon, 1.05); // weight tolerance (same weight tolerance for every weight there is)
142 
143  MPI_Comm comm = Dune::MPIHelper::getCommunicator();
144 
145  // Make the number of interior elements of each processor available to all processors
146  std::vector<int> offset(gv.comm().size());
147  std::fill(offset.begin(), offset.end(), 0);
148 
149  gv.comm().template allgather<int>(&numElements, 1, offset.data());
150 
151  // The difference vtxdist[i+1] - vtxdist[i] is the number of elements that are on process i
152  std::vector<idx_t> vtxdist(gv.comm().size()+1);
153  vtxdist[0] = 0;
154 
155  for (unsigned int i=1; i<vtxdist.size(); ++i)
156  vtxdist[i] = vtxdist[i-1] + offset[i-1];
157 
158  // Set up element adjacency lists
159  std::vector<idx_t> xadj, adjncy;
160  xadj.push_back(0);
161 
162  for (InteriorElementIterator eIt = gv.template begin<0, Interior_Partition>(); eIt != gv.template end<0, Interior_Partition>(); ++eIt)
163  {
164  size_t numNeighbors = 0;
165 
166  for (IntersectionIterator iIt = gv.template ibegin(*eIt); iIt != gv.template iend(*eIt); ++iIt) {
167  if (iIt->neighbor()) {
168  adjncy.push_back(globalIndex.index(*iIt->outside()));
169 
170  ++numNeighbors;
171  }
172  }
173 
174  xadj.push_back(xadj.back() + numNeighbors);
175  }
176 
177 #if PARMETIS_MAJOR_VERSION >= 4
178  const int OK =
179 #endif
180  ParMETIS_V3_AdaptiveRepart(vtxdist.data(), xadj.data(), adjncy.data(), NULL, NULL, NULL,
181  &wgtflag, &numflag, &ncon, &nparts, tpwgts.data(), ubvec.data(),
182  &itr, options, &edgecut, reinterpret_cast<idx_t*>(interiorPart.data()), &comm);
183 
184 #if PARMETIS_MAJOR_VERSION >= 4
185  if (OK != METIS_OK)
186  DUNE_THROW(Dune::Exception, "ParMETIS returned error code " << OK);
187 #endif
188 
189  // At this point, interiorPart contains a target rank for each interior element, and they are sorted
190  // by the order in which the grid view traverses them. Now we need to do two things:
191  // a) Add additional dummy entries for the ghost elements
192  // b) Use the element index for the actual ordering. Since there may be different types of elements,
193  // we cannot use the index set directly, but have to go through a Mapper.
194 
195  typedef MultipleCodimMultipleGeomTypeMapper<GridView, MCMGElementLayout> ElementMapper;
196  ElementMapper elementMapper(gv);
197 
198  std::vector<unsigned int> part(gv.size(0));
199  std::fill(part.begin(), part.end(), 0);
200  unsigned int c = 0;
201  for (InteriorElementIterator eIt = gv.template begin<0, Interior_Partition>();
202  eIt != gv.template end<0, Interior_Partition>();
203  ++eIt)
204  {
205  part[elementMapper.index(*eIt)] = interiorPart[c++];
206  }
207  return part;
208  }
209  };
210 
211 } // namespace Dune
212 
213 #else // HAVE_PARMETIS
214 #warning "PARMETIS was not found, please check your configuration"
215 #endif
216 
217 #endif // DUNE_GRID_UTILITY_PARMETISGRIDPARTITIONER_HH
The dimension of the grid.
Definition: common/gridview.hh:130
Include standard header files.
Definition: agrid.hh:59
Mapper for multiple codim and multiple geometry types.
Traits::IntersectionIterator IntersectionIterator
type of the intersection iterator
Definition: common/gridview.hh:86
Provides a globally unique index for all entities of a distributed Dune grid.