Xpetra Version of the Day
Loading...
Searching...
No Matches
Xpetra_TripleMatrixMultiply_decl.hpp
Go to the documentation of this file.
1// @HEADER
2// *****************************************************************************
3// Xpetra: A linear algebra interface package
4//
5// Copyright 2012 NTESS and the Xpetra contributors.
6// SPDX-License-Identifier: BSD-3-Clause
7// *****************************************************************************
8// @HEADER
9
10#ifndef PACKAGES_XPETRA_SUP_UTILS_XPETRA_TRIPLEMATRIXMULTIPLY_DECL_HPP_
11#define PACKAGES_XPETRA_SUP_UTILS_XPETRA_TRIPLEMATRIXMULTIPLY_DECL_HPP_
12
13#include "Xpetra_ConfigDefs.hpp"
14
15// #include "Xpetra_BlockedCrsMatrix.hpp"
16#include "Xpetra_CrsMatrixWrap.hpp"
17#include "Xpetra_MapExtractor.hpp"
18#include "Xpetra_Map.hpp"
19#include "Xpetra_MatrixFactory.hpp"
20#include "Xpetra_Matrix.hpp"
21#include "Xpetra_StridedMapFactory.hpp"
22#include "Xpetra_StridedMap.hpp"
23#include "Xpetra_IO.hpp"
24
25#ifdef HAVE_XPETRA_TPETRA
26#include <TpetraExt_TripleMatrixMultiply.hpp>
27#include <Xpetra_TpetraCrsMatrix.hpp>
28#include <Tpetra_BlockCrsMatrix.hpp>
29#include <Tpetra_BlockCrsMatrix_Helpers.hpp>
30// #include <Xpetra_TpetraMultiVector.hpp>
31// #include <Xpetra_TpetraVector.hpp>
32#endif // HAVE_XPETRA_TPETRA
33
34namespace Xpetra {
35
36template <class Scalar,
37 class LocalOrdinal /*= int*/,
38 class GlobalOrdinal /*= LocalOrdinal*/,
39 class Node /*= Tpetra::KokkosClassic::DefaultNode::DefaultNodeType*/>
41#undef XPETRA_TRIPLEMATRIXMULTIPLY_SHORT
43
44 public:
69 static void MultiplyRAP(const Matrix& R, bool transposeR,
70 const Matrix& A, bool transposeA,
71 const Matrix& P, bool transposeP,
72 Matrix& Ac,
73 bool call_FillComplete_on_result = true,
74 bool doOptimizeStorage = true,
75 const std::string& label = std::string(),
76 const RCP<ParameterList>& params = null);
77
78}; // class TripleMatrixMultiply
79
80#ifdef HAVE_XPETRA_EPETRA
81// specialization TripleMatrixMultiply for SC=double, LO=GO=int
82template <>
83class TripleMatrixMultiply<double, int, int, EpetraNode> {
84 typedef double Scalar;
85 typedef int LocalOrdinal;
86 typedef int GlobalOrdinal;
89
90 public:
91 static void MultiplyRAP(const Matrix& R, bool transposeR,
92 const Matrix& A, bool transposeA,
93 const Matrix& P, bool transposeP,
94 Matrix& Ac,
95 bool call_FillComplete_on_result = true,
96 bool doOptimizeStorage = true,
97 const std::string& label = std::string(),
98 const RCP<ParameterList>& params = null) {
99 TEUCHOS_TEST_FOR_EXCEPTION(transposeR == false && Ac.getRowMap()->isSameAs(*R.getRowMap()) == false,
100 Exceptions::RuntimeError, "XpetraExt::TripleMatrixMultiply::MultiplyRAP: row map of Ac is not same as row map of R");
101 TEUCHOS_TEST_FOR_EXCEPTION(transposeR == true && Ac.getRowMap()->isSameAs(*R.getDomainMap()) == false,
102 Exceptions::RuntimeError, "XpetraExt::TripleMatrixMultiply::MultiplyRAP: row map of Ac is not same as domain map of R");
103
107
108 bool haveMultiplyDoFillComplete = call_FillComplete_on_result && doOptimizeStorage;
109
110 if (Ac.getRowMap()->lib() == Xpetra::UseEpetra) {
111 throw(Xpetra::Exceptions::RuntimeError("Xpetra::TripleMatrixMultiply::MultiplyRAP is only implemented for Tpetra"));
112 } else if (Ac.getRowMap()->lib() == Xpetra::UseTpetra) {
113#ifdef HAVE_XPETRA_TPETRA
114#if ((defined(EPETRA_HAVE_OMP) && (!defined(HAVE_TPETRA_INST_OPENMP) || !defined(HAVE_TPETRA_INST_INT_INT))) || \
115 (!defined(EPETRA_HAVE_OMP) && (!defined(HAVE_TPETRA_INST_SERIAL) || !defined(HAVE_TPETRA_INST_INT_INT))))
116 throw(Xpetra::Exceptions::RuntimeError("Xpetra must be compiled with Tpetra <double,int,int> ETI enabled."));
117#else
118 using helpers = Xpetra::Helpers<SC, LO, GO, NO>;
119 if (helpers::isTpetraCrs(R) && helpers::isTpetraCrs(A) && helpers::isTpetraCrs(P) && helpers::isTpetraCrs(Ac)) {
120 // All matrices are Crs
121 const Tpetra::CrsMatrix<SC, LO, GO, NO>& tpR = Xpetra::Helpers<SC, LO, GO, NO>::Op2TpetraCrs(R);
122 const Tpetra::CrsMatrix<SC, LO, GO, NO>& tpA = Xpetra::Helpers<SC, LO, GO, NO>::Op2TpetraCrs(A);
123 const Tpetra::CrsMatrix<SC, LO, GO, NO>& tpP = Xpetra::Helpers<SC, LO, GO, NO>::Op2TpetraCrs(P);
124 Tpetra::CrsMatrix<SC, LO, GO, NO>& tpAc = Xpetra::Helpers<SC, LO, GO, NO>::Op2NonConstTpetraCrs(Ac);
125
126 // 18Feb2013 JJH I'm reenabling the code that allows the matrix matrix multiply to do the fillComplete.
127 // Previously, Tpetra's matrix matrix multiply did not support fillComplete.
128 Tpetra::TripleMatrixMultiply::MultiplyRAP(tpR, transposeR, tpA, transposeA, tpP, transposeP, tpAc, haveMultiplyDoFillComplete, label, params);
129 } else if (helpers::isTpetraBlockCrs(R) && helpers::isTpetraBlockCrs(A) && helpers::isTpetraBlockCrs(P)) {
130 // All matrices are BlockCrs (except maybe Ac)
131 // FIXME: For the moment we're just going to clobber the innards of AC, so no reuse. Once we have a reuse kernel,
132 // we'll need to think about refactoring BlockCrs so we can do something smarter here.
133 if (!A.getRowMap()->getComm()->getRank())
134 std::cout << "WARNING: Using inefficient BlockCrs Multiply Placeholder" << std::endl;
135
136 const Tpetra::BlockCrsMatrix<SC, LO, GO, NO>& tpR = Xpetra::Helpers<SC, LO, GO, NO>::Op2TpetraBlockCrs(R);
137 const Tpetra::BlockCrsMatrix<SC, LO, GO, NO>& tpA = Xpetra::Helpers<SC, LO, GO, NO>::Op2TpetraBlockCrs(A);
138 const Tpetra::BlockCrsMatrix<SC, LO, GO, NO>& tpP = Xpetra::Helpers<SC, LO, GO, NO>::Op2TpetraBlockCrs(P);
139 // Tpetra::BlockCrsMatrix<SC,LO,GO,NO> & tpAc = Xpetra::Helpers<SC,LO,GO,NO>::Op2NonConstTpetraBlockCrs(Ac);
140
141 using CRS = Tpetra::CrsMatrix<SC, LO, GO, NO>;
142 RCP<const CRS> Rcrs = Tpetra::convertToCrsMatrix(tpR);
143 RCP<const CRS> Acrs = Tpetra::convertToCrsMatrix(tpA);
144 RCP<const CRS> Pcrs = Tpetra::convertToCrsMatrix(tpP);
145 // RCP<CRS> Accrs = Tpetra::convertToCrsMatrix(tpAc);
146
147 // FIXME: The lines below only works because we're assuming Ac is Point
148 RCP<CRS> Accrs = Teuchos::rcp(new CRS(Rcrs->getRowMap(), 0));
149 const bool do_fill_complete = true;
150 Tpetra::TripleMatrixMultiply::MultiplyRAP(*Rcrs, transposeR, *Acrs, transposeA, *Pcrs, transposeP, *Accrs, do_fill_complete, label, params);
151
152 // Temporary output matrix
153 RCP<Tpetra::BlockCrsMatrix<SC, LO, GO, NO> > Ac_t = Tpetra::convertToBlockCrsMatrix(*Accrs, A.GetStorageBlockSize());
156
157 // We can now cheat and replace the innards of Ac
158 RCP<Xpetra::CrsMatrixWrap<SC, LO, GO, NO> > Ac_w = Teuchos::rcp_dynamic_cast<Xpetra::CrsMatrixWrap<SC, LO, GO, NO> >(Teuchos::rcpFromRef(Ac));
159 Ac_w->replaceCrsMatrix(Ac_p);
160
161 } else {
162 // Mix and match (not supported)
163 TEUCHOS_TEST_FOR_EXCEPTION(1, Exceptions::RuntimeError, "Mix-and-match Crs/BlockCrs Multiply not currently supported");
164 }
165#endif
166#else
167 throw(Xpetra::Exceptions::RuntimeError("Xpetra must be compiled with Tpetra."));
168#endif
169 if (call_FillComplete_on_result && !haveMultiplyDoFillComplete) {
171 fillParams->set("Optimize Storage", doOptimizeStorage);
172 Ac.fillComplete((transposeP) ? P.getRangeMap() : P.getDomainMap(),
173 (transposeR) ? R.getDomainMap() : R.getRangeMap(),
174 fillParams);
175 }
176
177 // transfer striding information
178 RCP<const Map> domainMap = Teuchos::null;
179 RCP<const Map> rangeMap = Teuchos::null;
180
181 const std::string stridedViewLabel("stridedMaps");
182 const size_t blkSize = 1;
183 std::vector<size_t> stridingInfo(1, blkSize);
184 LocalOrdinal stridedBlockId = -1;
185
186 if (R.IsView(stridedViewLabel)) {
187 rangeMap = transposeR ? R.getColMap(stridedViewLabel) : R.getRowMap(stridedViewLabel);
188 } else {
189 rangeMap = transposeR ? R.getDomainMap() : R.getRangeMap();
190 rangeMap = StridedMapFactory::Build(rangeMap, stridingInfo, stridedBlockId);
191 }
192
193 if (P.IsView(stridedViewLabel)) {
194 domainMap = transposeP ? P.getRowMap(stridedViewLabel) : P.getColMap(stridedViewLabel);
195 } else {
196 domainMap = transposeP ? P.getRangeMap() : P.getDomainMap();
197 domainMap = StridedMapFactory::Build(domainMap, stridingInfo, stridedBlockId);
198 }
199 Ac.CreateView(stridedViewLabel, rangeMap, domainMap);
200 }
201
202 } // end Multiply
203
204}; // end specialization on SC=double, GO=int and NO=EpetraNode
205
206// specialization TripleMatrixMultiply for SC=double, GO=long long and NO=EpetraNode
207template <>
208class TripleMatrixMultiply<double, int, long long, EpetraNode> {
209 typedef double Scalar;
210 typedef int LocalOrdinal;
211 typedef long long GlobalOrdinal;
214
215 public:
216 static void MultiplyRAP(const Matrix& R, bool transposeR,
217 const Matrix& A, bool transposeA,
218 const Matrix& P, bool transposeP,
219 Matrix& Ac,
220 bool call_FillComplete_on_result = true,
221 bool doOptimizeStorage = true,
222 const std::string& label = std::string(),
223 const RCP<ParameterList>& params = null) {
224 TEUCHOS_TEST_FOR_EXCEPTION(transposeR == false && Ac.getRowMap()->isSameAs(*R.getRowMap()) == false,
225 Exceptions::RuntimeError, "XpetraExt::TripleMatrixMultiply::MultiplyRAP: row map of Ac is not same as row map of R");
226 TEUCHOS_TEST_FOR_EXCEPTION(transposeR == true && Ac.getRowMap()->isSameAs(*R.getDomainMap()) == false,
227 Exceptions::RuntimeError, "XpetraExt::TripleMatrixMultiply::MultiplyRAP: row map of Ac is not same as domain map of R");
228
232
233 bool haveMultiplyDoFillComplete = call_FillComplete_on_result && doOptimizeStorage;
234
235 if (Ac.getRowMap()->lib() == Xpetra::UseEpetra) {
236 throw(Xpetra::Exceptions::RuntimeError("Xpetra::TripleMatrixMultiply::MultiplyRAP is only implemented for Tpetra"));
237 } else if (Ac.getRowMap()->lib() == Xpetra::UseTpetra) {
238#ifdef HAVE_XPETRA_TPETRA
239#if ((defined(EPETRA_HAVE_OMP) && (!defined(HAVE_TPETRA_INST_OPENMP) || !defined(HAVE_TPETRA_INST_INT_LONG_LONG))) || \
240 (!defined(EPETRA_HAVE_OMP) && (!defined(HAVE_TPETRA_INST_SERIAL) || !defined(HAVE_TPETRA_INST_INT_LONG_LONG))))
241 throw(Xpetra::Exceptions::RuntimeError("Xpetra must be compiled with Tpetra <double,int,long long,EpetraNode> ETI enabled."));
242#else
243 using helpers = Xpetra::Helpers<SC, LO, GO, NO>;
244 if (helpers::isTpetraCrs(R) && helpers::isTpetraCrs(A) && helpers::isTpetraCrs(P)) {
245 // All matrices are Crs
246 const Tpetra::CrsMatrix<SC, LO, GO, NO>& tpR = Xpetra::Helpers<SC, LO, GO, NO>::Op2TpetraCrs(R);
247 const Tpetra::CrsMatrix<SC, LO, GO, NO>& tpA = Xpetra::Helpers<SC, LO, GO, NO>::Op2TpetraCrs(A);
248 const Tpetra::CrsMatrix<SC, LO, GO, NO>& tpP = Xpetra::Helpers<SC, LO, GO, NO>::Op2TpetraCrs(P);
249 Tpetra::CrsMatrix<SC, LO, GO, NO>& tpAc = Xpetra::Helpers<SC, LO, GO, NO>::Op2NonConstTpetraCrs(Ac);
250
251 // 18Feb2013 JJH I'm reenabling the code that allows the matrix matrix multiply to do the fillComplete.
252 // Previously, Tpetra's matrix matrix multiply did not support fillComplete.
253 Tpetra::TripleMatrixMultiply::MultiplyRAP(tpR, transposeR, tpA, transposeA, tpP, transposeP, tpAc, haveMultiplyDoFillComplete, label, params);
254 } else if (helpers::isTpetraBlockCrs(R) && helpers::isTpetraBlockCrs(A) && helpers::isTpetraBlockCrs(P)) {
255 // All matrices are BlockCrs (except maybe Ac)
256 // FIXME: For the moment we're just going to clobber the innards of AC, so no reuse. Once we have a reuse kernel,
257 // we'll need to think about refactoring BlockCrs so we can do something smarter here.
258 if (!A.getRowMap()->getComm()->getRank())
259 std::cout << "WARNING: Using inefficient BlockCrs Multiply Placeholder" << std::endl;
260
261 const Tpetra::BlockCrsMatrix<SC, LO, GO, NO>& tpR = Xpetra::Helpers<SC, LO, GO, NO>::Op2TpetraBlockCrs(R);
262 const Tpetra::BlockCrsMatrix<SC, LO, GO, NO>& tpA = Xpetra::Helpers<SC, LO, GO, NO>::Op2TpetraBlockCrs(A);
263 const Tpetra::BlockCrsMatrix<SC, LO, GO, NO>& tpP = Xpetra::Helpers<SC, LO, GO, NO>::Op2TpetraBlockCrs(P);
264 // Tpetra::BlockCrsMatrix<SC,LO,GO,NO> & tpAc = Xpetra::Helpers<SC,LO,GO,NO>::Op2NonConstTpetraBlockCrs(Ac);
265
266 using CRS = Tpetra::CrsMatrix<SC, LO, GO, NO>;
267 RCP<const CRS> Rcrs = Tpetra::convertToCrsMatrix(tpR);
268 RCP<const CRS> Acrs = Tpetra::convertToCrsMatrix(tpA);
269 RCP<const CRS> Pcrs = Tpetra::convertToCrsMatrix(tpP);
270 // RCP<CRS> Accrs = Tpetra::convertToCrsMatrix(tpAc);
271
272 // FIXME: The lines below only works because we're assuming Ac is Point
273 RCP<CRS> Accrs = Teuchos::rcp(new CRS(Rcrs->getRowMap(), 0));
274 const bool do_fill_complete = true;
275 Tpetra::TripleMatrixMultiply::MultiplyRAP(*Rcrs, transposeR, *Acrs, transposeA, *Pcrs, transposeP, *Accrs, do_fill_complete, label, params);
276
277 // Temporary output matrix
278 RCP<Tpetra::BlockCrsMatrix<SC, LO, GO, NO> > Ac_t = Tpetra::convertToBlockCrsMatrix(*Accrs, A.GetStorageBlockSize());
281
282 // We can now cheat and replace the innards of Ac
283 RCP<Xpetra::CrsMatrixWrap<SC, LO, GO, NO> > Ac_w = Teuchos::rcp_dynamic_cast<Xpetra::CrsMatrixWrap<SC, LO, GO, NO> >(Teuchos::rcpFromRef(Ac));
284 Ac_w->replaceCrsMatrix(Ac_p);
285 } else {
286 // Mix and match
287 TEUCHOS_TEST_FOR_EXCEPTION(1, Exceptions::RuntimeError, "Mix-and-match Crs/BlockCrs Multiply not currently supported");
288 }
289
290#endif
291#else
292 throw(Xpetra::Exceptions::RuntimeError("Xpetra must be compiled with Tpetra."));
293#endif
294 if (call_FillComplete_on_result && !haveMultiplyDoFillComplete) {
296 fillParams->set("Optimize Storage", doOptimizeStorage);
297 Ac.fillComplete((transposeP) ? P.getRangeMap() : P.getDomainMap(),
298 (transposeR) ? R.getDomainMap() : R.getRangeMap(),
299 fillParams);
300 }
301
302 // transfer striding information
303 RCP<const Map> domainMap = Teuchos::null;
304 RCP<const Map> rangeMap = Teuchos::null;
305
306 const std::string stridedViewLabel("stridedMaps");
307 const size_t blkSize = 1;
308 std::vector<size_t> stridingInfo(1, blkSize);
309 LocalOrdinal stridedBlockId = -1;
310
311 if (R.IsView(stridedViewLabel)) {
312 rangeMap = transposeR ? R.getColMap(stridedViewLabel) : R.getRowMap(stridedViewLabel);
313 } else {
314 rangeMap = transposeR ? R.getDomainMap() : R.getRangeMap();
315 rangeMap = StridedMapFactory::Build(rangeMap, stridingInfo, stridedBlockId);
316 }
317
318 if (P.IsView(stridedViewLabel)) {
319 domainMap = transposeP ? P.getRowMap(stridedViewLabel) : P.getColMap(stridedViewLabel);
320 } else {
321 domainMap = transposeP ? P.getRangeMap() : P.getDomainMap();
322 domainMap = StridedMapFactory::Build(domainMap, stridingInfo, stridedBlockId);
323 }
324 Ac.CreateView(stridedViewLabel, rangeMap, domainMap);
325 }
326
327 } // end Multiply
328
329}; // end specialization on GO=long long and NO=EpetraNode
330#endif
331
332} // end namespace Xpetra
333
334#define XPETRA_TRIPLEMATRIXMULTIPLY_SHORT
335
336#endif /* PACKAGES_XPETRA_SUP_UTILS_XPETRA_TRIPLEMATRIXMULTIPLY_DECL_HPP_ */
Exception throws to report errors in the internal logical of the program.
static RCP< Tpetra::CrsMatrix< SC, LO, GO, NO > > Op2NonConstTpetraCrs(RCP< Matrix > Op)
static RCP< const Tpetra::BlockCrsMatrix< SC, LO, GO, NO > > Op2TpetraBlockCrs(RCP< Matrix > Op)
static RCP< const Tpetra::CrsMatrix< SC, LO, GO, NO > > Op2TpetraCrs(RCP< Matrix > Op)
Xpetra-specific matrix class.
void CreateView(viewLabel_t viewLabel, const RCP< const Map > &rowMap, const RCP< const Map > &colMap)
virtual const RCP< const Map > & getRowMap() const
Returns the Map that describes the row distribution in this matrix.
virtual bool isFillComplete() const =0
Returns true if fillComplete() has been called and the matrix is in compute mode.
virtual const RCP< const Map > & getColMap() const
Returns the Map that describes the column distribution in this matrix. This might be null until fillC...
virtual void fillComplete(const RCP< const Map > &domainMap, const RCP< const Map > &rangeMap, const RCP< ParameterList > &params=null)=0
Signal that data entry is complete, specifying domain and range maps.
bool IsView(const viewLabel_t viewLabel) const
virtual LocalOrdinal GetStorageBlockSize() const =0
Returns the block size of the storage mechanism, which is usually 1, except for Tpetra::BlockCrsMatri...
virtual const Teuchos::RCP< const map_type > getDomainMap() const =0
The Map associated with the domain of this operator, which must be compatible with X....
virtual const Teuchos::RCP< const map_type > getRangeMap() const =0
The Map associated with the range of this operator, which must be compatible with Y....
static RCP< Xpetra::StridedMap< LocalOrdinal, GlobalOrdinal, Node > > Build(UnderlyingLib lib, global_size_t numGlobalElements, GlobalOrdinal indexBase, std::vector< size_t > &stridingInfo, const Teuchos::RCP< const Teuchos::Comm< int > > &comm, LocalOrdinal stridedBlockId=-1, GlobalOrdinal offset=0, LocalGlobal lg=Xpetra::GloballyDistributed)
Map constructor with Xpetra-defined contiguous uniform distribution.
static void MultiplyRAP(const Matrix &R, bool transposeR, const Matrix &A, bool transposeA, const Matrix &P, bool transposeP, Matrix &Ac, bool call_FillComplete_on_result=true, bool doOptimizeStorage=true, const std::string &label=std::string(), const RCP< ParameterList > &params=null)
static void MultiplyRAP(const Matrix &R, bool transposeR, const Matrix &A, bool transposeA, const Matrix &P, bool transposeP, Matrix &Ac, bool call_FillComplete_on_result=true, bool doOptimizeStorage=true, const std::string &label=std::string(), const RCP< ParameterList > &params=null)
static void MultiplyRAP(const Matrix &R, bool transposeR, const Matrix &A, bool transposeA, const Matrix &P, bool transposeP, Matrix &Ac, bool call_FillComplete_on_result=true, bool doOptimizeStorage=true, const std::string &label=std::string(), const RCP< ParameterList > &params=null)
#define TEUCHOS_TEST_FOR_EXCEPTION(throw_exception_test, Exception, msg)
TEUCHOS_DEPRECATED RCP< T > rcp(T *p, Dealloc_T dealloc, bool owns_mem)
Tpetra::KokkosCompat::KokkosSerialWrapperNode EpetraNode