46 #ifndef MUELU_REPARTITIONFACTORY_DEF_HPP 47 #define MUELU_REPARTITIONFACTORY_DEF_HPP 56 #include <Teuchos_DefaultMpiComm.hpp> 57 #include <Teuchos_CommHelpers.hpp> 59 #include <Xpetra_Map.hpp> 60 #include <Xpetra_MapFactory.hpp> 61 #include <Xpetra_VectorFactory.hpp> 62 #include <Xpetra_Import.hpp> 63 #include <Xpetra_ImportFactory.hpp> 64 #include <Xpetra_Export.hpp> 65 #include <Xpetra_ExportFactory.hpp> 66 #include <Xpetra_Matrix.hpp> 67 #include <Xpetra_MatrixFactory.hpp> 69 #include "MueLu_Utilities.hpp" 77 template <
class Scalar,
class LocalOrdinal,
class GlobalOrdinal,
class Node>
79 RCP<ParameterList> validParamList = rcp(
new ParameterList());
81 #define SET_VALID_ENTRY(name) validParamList->setEntry(name, MasterList::getEntry(name)) 88 #undef SET_VALID_ENTRY 90 validParamList->set< RCP<const FactoryBase> >(
"A", Teuchos::null,
"Factory of the matrix A");
91 validParamList->set< RCP<const FactoryBase> >(
"Partition", Teuchos::null,
"Factory of the partition");
93 return validParamList;
96 template <
class Scalar,
class LocalOrdinal,
class GlobalOrdinal,
class Node>
98 Input(currentLevel,
"A");
99 Input(currentLevel,
"Partition");
109 template <
class Scalar,
class LocalOrdinal,
class GlobalOrdinal,
class Node>
113 const Teuchos::ParameterList & pL = GetParameterList();
116 const int startLevel = pL.get<
int> (
"repartition: start level");
117 const LO minRowsPerProcessor = pL.get<LO> (
"repartition: min rows per proc");
118 const double nonzeroImbalance = pL.get<
double>(
"repartition: max imbalance");
119 const bool remapPartitions = pL.get<
bool> (
"repartition: remap parts");
122 RCP<Matrix> A = Get< RCP<Matrix> >(currentLevel,
"A");
132 if (currentLevel.GetLevelID() < startLevel) {
133 GetOStream(
Statistics0) <<
"Repartitioning? NO:" <<
135 ", first level where repartitioning can happen is " +
Teuchos::toString(startLevel) << std::endl;
137 Set<RCP<const Import> >(currentLevel,
"Importer", Teuchos::null);
141 RCP<const Map> rowMap = A->getRowMap();
146 RCP<const Teuchos::Comm<int> > origComm = rowMap->getComm();
147 RCP<const Teuchos::Comm<int> > comm = origComm->duplicate();
153 int numActiveProcesses = 0;
154 MueLu_sumAll(comm, Teuchos::as<int>((A->getNodeNumRows() > 0) ? 1 : 0), numActiveProcesses);
156 if (numActiveProcesses == 1) {
157 GetOStream(
Statistics0) <<
"Repartitioning? NO:" <<
158 "\n # processes with rows = " <<
Teuchos::toString(numActiveProcesses) << std::endl;
160 Set<RCP<const Import> >(currentLevel,
"Importer", Teuchos::null);
165 bool test3 =
false, test4 =
false;
166 std::string msg3, msg4;
170 if (minRowsPerProcessor > 0) {
171 LO numMyRows = Teuchos::as<LO>(A->getNodeNumRows()), minNumRows, LOMAX = Teuchos::OrdinalTraits<LO>::max();
172 LO haveFewRows = (numMyRows < minRowsPerProcessor ? 1 : 0), numWithFewRows = 0;
174 MueLu_minAll(comm, (numMyRows > 0 ? numMyRows : LOMAX), minNumRows);
179 if (numWithFewRows > 0)
187 GO minNnz, maxNnz, numMyNnz = Teuchos::as<GO>(A->getNodeNumEntries());
189 MueLu_minAll(comm, (numMyNnz > 0 ? numMyNnz : maxNnz), minNnz);
190 double imbalance = Teuchos::as<double>(maxNnz)/minNnz;
192 if (imbalance > nonzeroImbalance)
198 if (!test3 && !test4) {
199 GetOStream(
Statistics0) <<
"Repartitioning? NO:" << msg3 + msg4 << std::endl;
201 Set<RCP<const Import> >(currentLevel,
"Importer", Teuchos::null);
205 GetOStream(
Statistics0) <<
"Repartitioning? YES:" << msg3 + msg4 << std::endl;
207 GO indexBase = rowMap->getIndexBase();
208 Xpetra::UnderlyingLib lib = rowMap->lib();
209 int myRank = comm->getRank();
210 int numProcs = comm->getSize();
212 RCP<const Teuchos::MpiComm<int> > tmpic = rcp_dynamic_cast<
const Teuchos::MpiComm<int> >(comm);
213 TEUCHOS_TEST_FOR_EXCEPTION(tmpic == Teuchos::null,
Exceptions::RuntimeError,
"Cannot cast base Teuchos::Comm to Teuchos::MpiComm object.");
214 RCP<const Teuchos::OpaqueWrapper<MPI_Comm> > rawMpiComm = tmpic->getRawMpiComm();
222 if (currentLevel.IsAvailable(
"number of partitions")) {
223 numPartitions = currentLevel.Get<GO>(
"number of partitions");
224 GetOStream(
Warnings0) <<
"Using user-provided \"number of partitions\", the performance is unknown" << std::endl;
227 if (Teuchos::as<GO>(A->getGlobalNumRows()) < minRowsPerProcessor) {
233 numPartitions = A->getGlobalNumRows() / minRowsPerProcessor;
235 numPartitions = std::min(numPartitions, Teuchos::as<GO>(numProcs));
237 currentLevel.Set(
"number of partitions", numPartitions,
NoFactory::get());
239 GetOStream(
Statistics0) <<
"Number of partitions to use = " << numPartitions << std::endl;
244 RCP<GOVector> decomposition;
245 if (numPartitions == 1) {
250 GetOStream(
Warnings0) <<
"Only one partition: Skip call to the repartitioner." << std::endl;
251 decomposition = Xpetra::VectorFactory<GO, LO, GO, NO>::Build(A->getRowMap(),
true);
254 decomposition = Get<RCP<GOVector> >(currentLevel,
"Partition");
256 if (decomposition.is_null()) {
257 GetOStream(
Warnings0) <<
"No repartitioning necessary: partitions were left unchanged by the repartitioner" << std::endl;
258 Set<RCP<const Import> >(currentLevel,
"Importer", Teuchos::null);
300 if (remapPartitions) {
303 DeterminePartitionPlacement(*A, *decomposition, numPartitions);
314 ArrayRCP<const GO> decompEntries;
315 if (decomposition->getLocalLength() > 0)
316 decompEntries = decomposition->getData(0);
318 #ifdef HAVE_MUELU_DEBUG 320 int incorrectRank = -1;
321 for (
int i = 0; i < decompEntries.size(); i++)
322 if (decompEntries[i] >= numProcs || decompEntries[i] < 0) {
323 incorrectRank = myRank;
327 int incorrectGlobalRank = -1;
333 myGIDs.reserve(decomposition->getLocalLength());
338 typedef std::map<GO, Array<GO> > map_type;
340 for (LO i = 0; i < decompEntries.size(); i++) {
341 GO
id = decompEntries[i];
342 GO GID = rowMap->getGlobalElement(i);
345 myGIDs .push_back(GID);
347 sendMap[id].push_back(GID);
349 decompEntries = Teuchos::null;
352 GO numLocalKept = myGIDs.size(), numGlobalKept, numGlobalRows = A->getGlobalNumRows();
354 GetOStream(
Statistics2) <<
"Unmoved rows: " << numGlobalKept <<
" / " << numGlobalRows <<
" (" << 100*Teuchos::as<double>(numGlobalKept)/numGlobalRows <<
"%)" << std::endl;
357 int numSend = sendMap.size(), numRecv;
360 Array<GO> myParts(numSend), myPart(1);
363 for (
typename map_type::const_iterator it = sendMap.begin(); it != sendMap.end(); it++)
364 myParts[cnt++] = it->first;
368 GO partsIndexBase = 0;
369 RCP<Map> partsIHave = MapFactory ::Build(lib, Teuchos::OrdinalTraits<Xpetra::global_size_t>::invalid(), myParts(), partsIndexBase, comm);
370 RCP<Map> partsIOwn = MapFactory ::Build(lib, numProcs, myPart(), partsIndexBase, comm);
371 RCP<Export> partsExport = ExportFactory::Build(partsIHave, partsIOwn);
373 RCP<GOVector> partsISend = Xpetra::VectorFactory<GO, LO, GO, NO>::Build(partsIHave);
374 RCP<GOVector> numPartsIRecv = Xpetra::VectorFactory<GO, LO, GO, NO>::Build(partsIOwn);
376 ArrayRCP<GO> partsISendData = partsISend->getDataNonConst(0);
377 for (
int i = 0; i < numSend; i++)
378 partsISendData[i] = 1;
380 (numPartsIRecv->getDataNonConst(0))[0] = 0;
382 numPartsIRecv->doExport(*partsISend, *partsExport, Xpetra::ADD);
383 numRecv = (numPartsIRecv->getData(0))[0];
390 Array<MPI_Request> sendReqs(numSend);
392 for (
typename map_type::iterator it = sendMap.begin(); it != sendMap.end(); it++)
393 MPI_Isend(static_cast<void*>(it->second.getRawPtr()), it->second.size(), MpiType, Teuchos::as<GO>(it->first), msgTag, *rawMpiComm, &sendReqs[cnt++]);
396 size_t totalGIDs = myGIDs.size();
397 for (
int i = 0; i < numRecv; i++) {
399 MPI_Probe(MPI_ANY_SOURCE, msgTag, *rawMpiComm, &status);
402 int fromRank = status.MPI_SOURCE, count;
403 MPI_Get_count(&status, MpiType, &count);
405 recvMap[fromRank].resize(count);
406 MPI_Recv(static_cast<void*>(recvMap[fromRank].getRawPtr()), count, MpiType, fromRank, msgTag, *rawMpiComm, &status);
413 Array<MPI_Status> sendStatuses(numSend);
414 MPI_Waitall(numSend, sendReqs.getRawPtr(), sendStatuses.getRawPtr());
418 myGIDs.reserve(totalGIDs);
419 for (
typename map_type::const_iterator it = recvMap.begin(); it != recvMap.end(); it++) {
420 int offset = myGIDs.size(), len = it->second.size();
422 myGIDs.resize(offset + len);
423 memcpy(myGIDs.getRawPtr() + offset, it->second.getRawPtr(), len*
sizeof(GO));
428 std::sort(myGIDs.begin(), myGIDs.end());
431 RCP<Map> newRowMap = MapFactory ::Build(lib, rowMap->getGlobalNumElements(), myGIDs(), indexBase, origComm);
432 RCP<const Import> rowMapImporter;
435 rowMapImporter = ImportFactory::Build(rowMap, newRowMap);
438 Set(currentLevel,
"Importer", rowMapImporter);
443 if (pL.get<
bool>(
"repartition: print partition distribution") && IsPrint(
Statistics2)) {
445 GetOStream(
Statistics2) <<
"Partition distribution over cores (ownership is indicated by '+')" << std::endl;
447 char amActive = (myGIDs.size() ? 1 : 0);
448 std::vector<char> areActive(numProcs, 0);
449 MPI_Gather(&amActive, 1, MPI_CHAR, &areActive[0], 1, MPI_CHAR, 0, *rawMpiComm);
451 int rowWidth = std::min(Teuchos::as<int>(ceil(sqrt(numProcs))), 100);
452 for (
int proc = 0; proc < numProcs; proc += rowWidth) {
453 for (
int j = 0; j < rowWidth; j++)
454 if (proc + j < numProcs)
455 GetOStream(
Statistics2) << (areActive[proc + j] ?
"+" :
".");
459 GetOStream(
Statistics2) <<
" " << proc <<
":" << std::min(proc + rowWidth, numProcs) - 1 << std::endl;
466 template<
typename T,
typename W>
471 template<
typename T,
typename W>
476 template <
class Scalar,
class LocalOrdinal,
class GlobalOrdinal,
class Node>
479 RCP<const Map> rowMap = A.getRowMap();
481 RCP<const Teuchos::Comm<int> > comm = rowMap->getComm()->duplicate();
482 int numProcs = comm->getSize();
484 RCP<const Teuchos::MpiComm<int> > tmpic = rcp_dynamic_cast<
const Teuchos::MpiComm<int> >(comm);
485 TEUCHOS_TEST_FOR_EXCEPTION(tmpic == Teuchos::null,
Exceptions::RuntimeError,
"Cannot cast base Teuchos::Comm to Teuchos::MpiComm object.");
486 RCP<const Teuchos::OpaqueWrapper<MPI_Comm> > rawMpiComm = tmpic->getRawMpiComm();
488 const Teuchos::ParameterList& pL = GetParameterList();
494 const int maxLocal = pL.get<
int>(
"repartition: remap num values");
495 const int dataSize = 2*maxLocal;
497 ArrayRCP<GO> decompEntries;
498 if (decomposition.getLocalLength() > 0)
499 decompEntries = decomposition.getDataNonConst(0);
511 std::map<GO,GO> lEdges;
512 for (LO i = 0; i < decompEntries.size(); i++)
513 lEdges[decompEntries[i]] += A.getNumEntriesInLocalRow(i);
517 std::multimap<GO,GO> revlEdges;
518 for (
typename std::map<GO,GO>::const_iterator it = lEdges.begin(); it != lEdges.end(); it++)
519 revlEdges.insert(std::make_pair(it->second, it->first));
524 Array<GO> lData(dataSize, -1), gData(numProcs * dataSize);
526 for (
typename std::multimap<GO,GO>::reverse_iterator rit = revlEdges.rbegin(); rit != revlEdges.rend() && numEdges < maxLocal; rit++) {
527 lData[2*numEdges+0] = rit->second;
528 lData[2*numEdges+1] = rit->first;
535 MPI_Allgather(static_cast<void*>(lData.getRawPtr()), dataSize, MpiType, static_cast<void*>(gData.getRawPtr()), dataSize, MpiType, *rawMpiComm);
540 std::vector<Triplet<int,int> > gEdges(numProcs * maxLocal);
542 for (LO i = 0; i < gData.size(); i += 2) {
543 GO part = gData[i+0];
544 GO weight = gData[i+1];
546 gEdges[k].i = i/dataSize;
548 gEdges[k].v = weight;
556 std::sort(gEdges.begin(), gEdges.end(), compareTriplets<int,int>);
559 std::map<int,int> match;
560 std::vector<char> matchedRanks(numProcs, 0), matchedParts(numProcs, 0);
562 for (
typename std::vector<
Triplet<int,int> >::const_iterator it = gEdges.begin(); it != gEdges.end(); it++) {
565 if (matchedRanks[rank] == 0 && matchedParts[part] == 0) {
566 matchedRanks[rank] = 1;
567 matchedParts[part] = 1;
572 GetOStream(
Statistics0) <<
"Number of unassigned paritions before cleanup stage: " << (numPartitions - numMatched) <<
" / " << numPartitions << std::endl;
577 for (
int part = 0, matcher = 0; part < numProcs; part++)
578 if (match.count(part) == 0) {
580 while (matchedRanks[matcher])
583 match[part] = matcher++;
587 for (LO i = 0; i < decompEntries.size(); i++)
588 decompEntries[i] = match[decompEntries[i]];
593 #endif //ifdef HAVE_MPI 595 #endif // MUELU_REPARTITIONFACTORY_DEF_HPP Important warning messages (one line)
void Build(Level ¤tLevel) const
Build an object with this factory.
#define MueLu_sumAll(rcpComm, in, out)
static MPI_Datatype getType()
std::string toString(const T &what)
Little helper function to convert non-string types to strings.
#define MueLu_maxAll(rcpComm, in, out)
Timer to be used in factories. Similar to Monitor but with additional timers.
static MPI_Datatype getType()
Namespace for MueLu classes and methods.
#define SET_VALID_ENTRY(name)
#define MueLu_minAll(rcpComm, in, out)
static const NoFactory * get()
Print even more statistics.
static MPI_Datatype getType()
Print statistics that do not involve significant additional computation.
static bool compareTriplets(const Triplet< T, W > &a, const Triplet< T, W > &b)
Class that holds all level-specific information.
Timer to be used in factories. Similar to SubMonitor but adds a timer level by level.
static MPI_Datatype getType()
void DeclareInput(Level ¤tLevel) const
Determines the data that RepartitionFactory needs, and the factories that generate that data...
static MPI_Datatype getType()
static MPI_Datatype getType()
Exception throws to report errors in the internal logical of the program.
RCP< const ParameterList > GetValidParameterList() const
Return a const parameter list of valid parameters that setParameterList() will accept.
void DeterminePartitionPlacement(const Matrix &A, GOVector &decomposition, GO numPartitions) const
Determine which process should own each partition.