MueLu Version of the Day
Loading...
Searching...
No Matches
MueLu_RepartitionUtilities_def.hpp
Go to the documentation of this file.
1// @HEADER
2// *****************************************************************************
3// MueLu: A package for multigrid based preconditioning
4//
5// Copyright 2012 NTESS and the MueLu contributors.
6// SPDX-License-Identifier: BSD-3-Clause
7// *****************************************************************************
8// @HEADER
9
10#ifndef MUELU_REPARTITIONUTILITIES_DEF_HPP
11#define MUELU_REPARTITIONUTILITIES_DEF_HPP
12
13#include <algorithm>
14#include <iostream>
15#include <sstream>
16
17#include "MueLu_RepartitionUtilities_decl.hpp" // TMP JG NOTE: before other includes, otherwise I cannot test the fwd declaration in _def
18
19#ifdef HAVE_MPI
20#include <Teuchos_DefaultMpiComm.hpp>
21#include <Teuchos_CommHelpers.hpp>
22#include <Teuchos_Details_MpiTypeTraits.hpp>
23
24#include <Xpetra_Map.hpp>
25#include <Xpetra_MapFactory.hpp>
26#include <Xpetra_MultiVectorFactory.hpp>
27#include <Xpetra_VectorFactory.hpp>
28#include <Xpetra_Import.hpp>
29#include <Xpetra_ImportFactory.hpp>
30#include <Xpetra_Export.hpp>
31#include <Xpetra_ExportFactory.hpp>
32
33#include "MueLu_Utilities.hpp"
34
35#include "MueLu_CloneRepartitionInterface.hpp"
36
37#include "MueLu_Level.hpp"
38#include "MueLu_MasterList.hpp"
39#include "MueLu_Monitor.hpp"
40#include "MueLu_PerfUtils.hpp"
41
42namespace MueLu {
43
44template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
45std::tuple<Teuchos::Array<GlobalOrdinal>, GlobalOrdinal>
47 ArrayRCP<const GO> decompEntries;
48 if (decomposition->getLocalLength() > 0)
49 decompEntries = decomposition->getData(0);
50
51 auto rowMap = decomposition->getMap();
52 auto comm = rowMap->getComm();
53 const auto myRank = comm->getRank();
54 const auto numProcs = comm->getSize();
55 Xpetra::UnderlyingLib lib = rowMap->lib();
56
57 Array<GO> myGIDs;
58 myGIDs.reserve(decomposition->getLocalLength());
59
60 // Step 0: Construct mapping
61 // part number -> GIDs I own which belong to this part
62 // NOTE: my own part GIDs are not part of the map
63 typedef std::map<GO, Array<GO> > map_type;
64 map_type sendMap;
65 for (LO i = 0; i < decompEntries.size(); i++) {
66 GO id = decompEntries[i];
67 GO GID = rowMap->getGlobalElement(i);
68
69 if (id == myRank)
70 myGIDs.push_back(GID);
71 else
72 sendMap[id].push_back(GID);
73 }
74 decompEntries = Teuchos::null;
75
76 int numSend = sendMap.size(), numRecv;
77
78 // Arrayify map keys
79 Array<GO> myParts(numSend), myPart(1);
80 int cnt = 0;
81 myPart[0] = myRank;
82 for (typename map_type::const_iterator it = sendMap.begin(); it != sendMap.end(); it++)
83 myParts[cnt++] = it->first;
84
85 const GO numLocalKept = myGIDs.size();
86
87 // Step 1: Find out how many processors send me data
88 // partsIndexBase starts from zero, as the processors ids start from zero
89 {
90 GO partsIndexBase = 0;
91 RCP<Map> partsIHave = MapFactory ::Build(lib, Teuchos::OrdinalTraits<Xpetra::global_size_t>::invalid(), myParts(), partsIndexBase, comm);
92 RCP<Map> partsIOwn = MapFactory ::Build(lib, numProcs, myPart(), partsIndexBase, comm);
93 RCP<Export> partsExport = ExportFactory::Build(partsIHave, partsIOwn);
94
95 RCP<GOVector> partsISend = Xpetra::VectorFactory<GO, LO, GO, NO>::Build(partsIHave);
96 RCP<GOVector> numPartsIRecv = Xpetra::VectorFactory<GO, LO, GO, NO>::Build(partsIOwn);
97 if (numSend) {
98 ArrayRCP<GO> partsISendData = partsISend->getDataNonConst(0);
99 for (int i = 0; i < numSend; i++)
100 partsISendData[i] = 1;
101 }
102 (numPartsIRecv->getDataNonConst(0))[0] = 0;
103
104 numPartsIRecv->doExport(*partsISend, *partsExport, Xpetra::ADD);
105 numRecv = (numPartsIRecv->getData(0))[0];
106 }
107
108 RCP<const Teuchos::MpiComm<int> > tmpic = rcp_dynamic_cast<const Teuchos::MpiComm<int> >(comm);
109 TEUCHOS_TEST_FOR_EXCEPTION(tmpic == Teuchos::null, Exceptions::RuntimeError, "Cannot cast base Teuchos::Comm to Teuchos::MpiComm object.");
110 RCP<const Teuchos::OpaqueWrapper<MPI_Comm> > rawMpiComm = tmpic->getRawMpiComm();
111
112 // Step 2: Get my GIDs from everybody else
113 MPI_Datatype MpiType = Teuchos::Details::MpiTypeTraits<GO>::getType();
114 int msgTag = 12345; // TODO: use Comm::dup for all internal messaging
115
116 // Post sends
117 Array<MPI_Request> sendReqs(numSend);
118 cnt = 0;
119 for (typename map_type::iterator it = sendMap.begin(); it != sendMap.end(); it++)
120 MPI_Isend(static_cast<void*>(it->second.getRawPtr()), it->second.size(), MpiType, Teuchos::as<GO>(it->first), msgTag, *rawMpiComm, &sendReqs[cnt++]);
121
122 map_type recvMap;
123 size_t totalGIDs = myGIDs.size();
124 for (int i = 0; i < numRecv; i++) {
125 MPI_Status status;
126 MPI_Probe(MPI_ANY_SOURCE, msgTag, *rawMpiComm, &status);
127
128 // Get rank and number of elements from status
129 int fromRank = status.MPI_SOURCE, count;
130 MPI_Get_count(&status, MpiType, &count);
131
132 recvMap[fromRank].resize(count);
133 MPI_Recv(static_cast<void*>(recvMap[fromRank].getRawPtr()), count, MpiType, fromRank, msgTag, *rawMpiComm, &status);
134
135 totalGIDs += count;
136 }
137
138 // Do waits on send requests
139 if (numSend) {
140 Array<MPI_Status> sendStatuses(numSend);
141 MPI_Waitall(numSend, sendReqs.getRawPtr(), sendStatuses.getRawPtr());
142 }
143
144 // Merge GIDs
145 myGIDs.reserve(totalGIDs);
146 for (typename map_type::const_iterator it = recvMap.begin(); it != recvMap.end(); it++) {
147 int offset = myGIDs.size(), len = it->second.size();
148 if (len) {
149 myGIDs.resize(offset + len);
150 memcpy(myGIDs.getRawPtr() + offset, it->second.getRawPtr(), len * sizeof(GO));
151 }
152 }
153 // NOTE 2: The general sorting algorithm could be sped up by using the knowledge that original myGIDs and all received chunks
154 // (i.e. it->second) are sorted. Therefore, a merge sort would work well in this situation.
155 std::sort(myGIDs.begin(), myGIDs.end());
156
157 return std::make_tuple(myGIDs, numLocalKept);
158}
159
160} // namespace MueLu
161
162#endif // ifdef HAVE_MPI
163
164#endif // MUELU_REPARTITIONUTILITIES_DEF_HPP
MueLu::DefaultGlobalOrdinal GlobalOrdinal
Exception throws to report errors in the internal logical of the program.
Namespace for MueLu classes and methods.
static std::tuple< Array< GO >, GO > ConstructGIDs(RCP< GOVector > decomposition)