Tpetra parallel linear algebra Version of the Day
Loading...
Searching...
No Matches
Tpetra_RowMatrix_def.hpp
1// @HEADER
2// *****************************************************************************
3// Tpetra: Templated Linear Algebra Services Package
4//
5// Copyright 2008 NTESS and the Tpetra contributors.
6// SPDX-License-Identifier: BSD-3-Clause
7// *****************************************************************************
8// @HEADER
9
10#ifndef TPETRA_ROWMATRIX_DEF_HPP
11#define TPETRA_ROWMATRIX_DEF_HPP
12
13#include "Tpetra_CrsMatrix.hpp"
14#include "Tpetra_Map.hpp"
15#include "Tpetra_RowGraph.hpp"
16
17namespace Tpetra {
18
19template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
21
22template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
23Teuchos::RCP<RowMatrix<Scalar, LocalOrdinal, GlobalOrdinal, Node> >
25 add(const Scalar& alpha,
27 const Scalar& beta,
28 const Teuchos::RCP<const Map<LocalOrdinal, GlobalOrdinal, Node> >& domainMap,
29 const Teuchos::RCP<const Map<LocalOrdinal, GlobalOrdinal, Node> >& rangeMap,
30 const Teuchos::RCP<Teuchos::ParameterList>& params) const {
31 using Teuchos::Array;
32 using Teuchos::ArrayView;
33 using Teuchos::ParameterList;
34 using Teuchos::RCP;
35 using Teuchos::rcp;
36 using Teuchos::rcp_implicit_cast;
37 using Teuchos::sublist;
38 typedef LocalOrdinal LO;
39 typedef GlobalOrdinal GO;
40 typedef Teuchos::ScalarTraits<Scalar> STS;
41 typedef Map<LO, GO, Node> map_type;
44
45 const this_type& B = *this; // a convenient abbreviation
46
47 // If the user didn't supply a domain or range Map, then try to
48 // get one from B first (if it has them), then from A (if it has
49 // them). If we don't have any domain or range Maps, scold the
50 // user.
51 RCP<const map_type> A_domainMap = A.getDomainMap();
52 RCP<const map_type> A_rangeMap = A.getRangeMap();
53 RCP<const map_type> B_domainMap = B.getDomainMap();
54 RCP<const map_type> B_rangeMap = B.getRangeMap();
55
58
59 if (domainMap.is_null()) {
60 if (B_domainMap.is_null()) {
62 A_domainMap.is_null(), std::invalid_argument,
63 "Tpetra::RowMatrix::add: If neither A nor B have a domain Map, "
64 "then you must supply a nonnull domain Map to this method.");
66 } else {
68 }
69 }
70 if (rangeMap.is_null()) {
71 if (B_rangeMap.is_null()) {
73 A_rangeMap.is_null(), std::invalid_argument,
74 "Tpetra::RowMatrix::add: If neither A nor B have a range Map, "
75 "then you must supply a nonnull range Map to this method.");
77 } else {
79 }
80 }
81
82#ifdef HAVE_TPETRA_DEBUG
83 // In a debug build, check that A and B have matching domain and
84 // range Maps, if they have domain and range Maps at all. (If
85 // they aren't fill complete, then they may not yet have them.)
86 if (!A_domainMap.is_null() && !A_rangeMap.is_null()) {
87 if (!B_domainMap.is_null() && !B_rangeMap.is_null()) {
89 !B_domainMap->isSameAs(*A_domainMap), std::invalid_argument,
90 "Tpetra::RowMatrix::add: The input RowMatrix A must have a domain Map "
91 "which is the same as (isSameAs) this RowMatrix's domain Map.");
93 !B_rangeMap->isSameAs(*A_rangeMap), std::invalid_argument,
94 "Tpetra::RowMatrix::add: The input RowMatrix A must have a range Map "
95 "which is the same as (isSameAs) this RowMatrix's range Map.");
97 !domainMap.is_null() && !domainMap->isSameAs(*B_domainMap),
98 std::invalid_argument,
99 "Tpetra::RowMatrix::add: The input domain Map must be the same as "
100 "(isSameAs) this RowMatrix's domain Map.");
102 !rangeMap.is_null() && !rangeMap->isSameAs(*B_rangeMap),
103 std::invalid_argument,
104 "Tpetra::RowMatrix::add: The input range Map must be the same as "
105 "(isSameAs) this RowMatrix's range Map.");
106 }
107 } else if (!B_domainMap.is_null() && !B_rangeMap.is_null()) {
109 !domainMap.is_null() && !domainMap->isSameAs(*B_domainMap),
110 std::invalid_argument,
111 "Tpetra::RowMatrix::add: The input domain Map must be the same as "
112 "(isSameAs) this RowMatrix's domain Map.");
114 !rangeMap.is_null() && !rangeMap->isSameAs(*B_rangeMap),
115 std::invalid_argument,
116 "Tpetra::RowMatrix::add: The input range Map must be the same as "
117 "(isSameAs) this RowMatrix's range Map.");
118 } else {
120 domainMap.is_null() || rangeMap.is_null(), std::invalid_argument,
121 "Tpetra::RowMatrix::add: If neither A nor B have a domain and range "
122 "Map, then you must supply a nonnull domain and range Map to this "
123 "method.");
124 }
125#endif // HAVE_TPETRA_DEBUG
126
127 // What parameters do we pass to C's constructor? Do we call
128 // fillComplete on C after filling it? And if so, what parameters
129 // do we pass to C's fillComplete call?
130 bool callFillComplete = true;
133 if (!params.is_null()) {
134 callFillComplete = params->get("Call fillComplete", callFillComplete);
135 constructorSublist = sublist(params, "Constructor parameters");
136 fillCompleteSublist = sublist(params, "fillComplete parameters");
137 }
138
139 RCP<const map_type> A_rowMap = A.getRowMap();
140 RCP<const map_type> B_rowMap = B.getRowMap();
141 RCP<const map_type> C_rowMap = B_rowMap; // see discussion in documentation
142 RCP<crs_matrix_type> C; // The result matrix.
143
144 // If A and B's row Maps are the same, we can compute an upper
145 // bound on the number of entries in each row of C, before
146 // actually computing the sum. A reasonable upper bound is the
147 // sum of the two entry counts in each row. If we choose this as
148 // the actual per-row upper bound, we can use static profile.
149 if (A_rowMap->isSameAs(*B_rowMap)) {
150 const LO localNumRows = static_cast<LO>(A_rowMap->getLocalNumElements());
152
153 // Get the number of entries in each row of A.
154 if (alpha != STS::zero()) {
155 for (LO localRow = 0; localRow < localNumRows; ++localRow) {
156 const size_t A_numEntries = A.getNumEntriesInLocalRow(localRow);
158 }
159 }
160 // Get the number of entries in each row of B.
161 if (beta != STS::zero()) {
162 for (LO localRow = 0; localRow < localNumRows; ++localRow) {
163 const size_t B_numEntries = B.getNumEntriesInLocalRow(localRow);
164 C_maxNumEntriesPerRow[localRow] += B_numEntries;
165 }
166 }
167 // Construct the result matrix C.
168 if (constructorSublist.is_null()) {
169 C = rcp(new crs_matrix_type(C_rowMap, C_maxNumEntriesPerRow()));
170 } else {
171 C = rcp(new crs_matrix_type(C_rowMap, C_maxNumEntriesPerRow(),
172 constructorSublist));
173 }
174 // Since A and B have the same row Maps, we could add them
175 // together all at once and merge values before we call
176 // insertGlobalValues. However, we don't really need to, since
177 // we've already allocated enough space in each row of C for C
178 // to do the merge itself.
179 } else { // the row Maps of A and B are not the same
180 // Construct the result matrix C.
181 // true: !A_rowMap->isSameAs (*B_rowMap)
182 TEUCHOS_TEST_FOR_EXCEPTION(true,
183 std::invalid_argument,
184 "Tpetra::RowMatrix::add: The row maps must be the same for statically "
185 "allocated matrices in order to be sure that there is sufficient space "
186 "to do the addition");
187 }
188
189#ifdef HAVE_TPETRA_DEBUG
190 TEUCHOS_TEST_FOR_EXCEPTION(C.is_null(), std::logic_error,
191 "Tpetra::RowMatrix::add: C should not be null at this point. "
192 "Please report this bug to the Tpetra developers.");
193#endif // HAVE_TPETRA_DEBUG
194 //
195 // Compute C = alpha*A + beta*B.
196 //
197 using gids_type = nonconst_global_inds_host_view_type;
198 using vals_type = nonconst_values_host_view_type;
199 gids_type ind;
200 vals_type val;
201
202 if (alpha != STS::zero()) {
203 const LO A_localNumRows = static_cast<LO>(A_rowMap->getLocalNumElements());
204 for (LO localRow = 0; localRow < A_localNumRows; ++localRow) {
205 size_t A_numEntries = A.getNumEntriesInLocalRow(localRow);
206 const GO globalRow = A_rowMap->getGlobalElement(localRow);
207 if (A_numEntries > static_cast<size_t>(ind.size())) {
208 Kokkos::resize(ind, A_numEntries);
209 Kokkos::resize(val, A_numEntries);
210 }
211 gids_type indView = Kokkos::subview(ind, std::make_pair((size_t)0, A_numEntries));
212 vals_type valView = Kokkos::subview(val, std::make_pair((size_t)0, A_numEntries));
213 A.getGlobalRowCopy(globalRow, indView, valView, A_numEntries);
214
215 if (alpha != STS::one()) {
216 for (size_t k = 0; k < A_numEntries; ++k) {
217 valView[k] *= alpha;
218 }
219 }
220 C->insertGlobalValues(globalRow, A_numEntries,
221 reinterpret_cast<const Scalar*>(valView.data()),
222 indView.data());
223 }
224 }
225
226 if (beta != STS::zero()) {
227 const LO B_localNumRows = static_cast<LO>(B_rowMap->getLocalNumElements());
228 for (LO localRow = 0; localRow < B_localNumRows; ++localRow) {
229 size_t B_numEntries = B.getNumEntriesInLocalRow(localRow);
230 const GO globalRow = B_rowMap->getGlobalElement(localRow);
231 if (B_numEntries > static_cast<size_t>(ind.size())) {
232 Kokkos::resize(ind, B_numEntries);
233 Kokkos::resize(val, B_numEntries);
234 }
235 gids_type indView = Kokkos::subview(ind, std::make_pair((size_t)0, B_numEntries));
236 vals_type valView = Kokkos::subview(val, std::make_pair((size_t)0, B_numEntries));
237 B.getGlobalRowCopy(globalRow, indView, valView, B_numEntries);
238
239 if (beta != STS::one()) {
240 for (size_t k = 0; k < B_numEntries; ++k) {
241 valView[k] *= beta;
242 }
243 }
244 C->insertGlobalValues(globalRow, B_numEntries,
245 reinterpret_cast<const Scalar*>(valView.data()),
246 indView.data());
247 }
248 }
249
250 if (callFillComplete) {
251 if (fillCompleteSublist.is_null()) {
252 C->fillComplete(theDomainMap, theRangeMap);
253 } else {
254 C->fillComplete(theDomainMap, theRangeMap, fillCompleteSublist);
255 }
256 }
257
258 return rcp_implicit_cast<this_type>(C);
259}
260
261template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
263 pack(const Teuchos::ArrayView<const LocalOrdinal>& exportLIDs,
264 Teuchos::Array<char>& exports,
265 const Teuchos::ArrayView<size_t>& numPacketsPerLID,
266 size_t& constantNumPackets) const {
267#ifdef HAVE_TPETRA_DEBUG
268 const char tfecfFuncName[] = "pack: ";
269 {
270 using Teuchos::reduceAll;
271 std::ostringstream msg;
272 int lclBad = 0;
273 try {
274 this->packImpl(exportLIDs, exports, numPacketsPerLID,
276 } catch (std::exception& e) {
277 lclBad = 1;
278 msg << e.what();
279 }
280 int gblBad = 0;
281 const Teuchos::Comm<int>& comm = *(this->getComm());
282 reduceAll<int, int>(comm, Teuchos::REDUCE_MAX,
283 lclBad, Teuchos::outArg(gblBad));
284 if (gblBad != 0) {
285 const int myRank = comm.getRank();
286 const int numProcs = comm.getSize();
287 for (int r = 0; r < numProcs; ++r) {
288 if (r == myRank && lclBad != 0) {
289 std::ostringstream os;
290 os << "Proc " << myRank << ": " << msg.str() << std::endl;
291 std::cerr << os.str();
292 }
293 comm.barrier();
294 comm.barrier();
295 comm.barrier();
296 }
298 true, std::logic_error,
299 "packImpl() threw an exception on one or "
300 "more participating processes.");
301 }
302 }
303#else
304 this->packImpl(exportLIDs, exports, numPacketsPerLID,
306#endif // HAVE_TPETRA_DEBUG
307}
308
309template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
311 allocatePackSpace(Teuchos::Array<char>& exports,
312 size_t& totalNumEntries,
313 const Teuchos::ArrayView<const LocalOrdinal>& exportLIDs) const {
314 typedef LocalOrdinal LO;
315 typedef GlobalOrdinal GO;
316 typedef typename Teuchos::ArrayView<const LO>::size_type size_type;
317 // const char tfecfFuncName[] = "allocatePackSpace: ";
318 const size_type numExportLIDs = exportLIDs.size();
319
320 // Count the total number of entries to send.
321 totalNumEntries = 0;
322 for (size_type i = 0; i < numExportLIDs; ++i) {
323 const LO lclRow = exportLIDs[i];
324 size_t curNumEntries = this->getNumEntriesInLocalRow(lclRow);
325 // FIXME (mfh 25 Jan 2015) We should actually report invalid row
326 // indices as an error. Just consider them nonowned for now.
327 if (curNumEntries == Teuchos::OrdinalTraits<size_t>::invalid()) {
328 curNumEntries = 0;
329 }
330 totalNumEntries += curNumEntries;
331 }
332
333 // FIXME (mfh 24 Feb 2013) This code is only correct if
334 // sizeof(Scalar) is a meaningful representation of the amount of
335 // data in a Scalar instance. (LO and GO are always built-in
336 // integer types.)
337 //
338 // Allocate the exports array. It does NOT need padding for
339 // alignment, since we use memcpy to write to / read from send /
340 // receive buffers.
341 const size_t allocSize =
342 static_cast<size_t>(numExportLIDs) * sizeof(LO) +
343 totalNumEntries * (sizeof(Scalar) + sizeof(GO));
344 if (static_cast<size_t>(exports.size()) < allocSize) {
345 exports.resize(allocSize);
346 }
347}
348
349template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
350bool RowMatrix<Scalar, LocalOrdinal, GlobalOrdinal, Node>::
351 packRow(char* const numEntOut,
352 char* const valOut,
353 char* const indOut,
354 const size_t numEnt,
355 const LocalOrdinal lclRow) const {
356 using Teuchos::Array;
357 using Teuchos::ArrayView;
358 typedef LocalOrdinal LO;
359 typedef GlobalOrdinal GO;
360 typedef Tpetra::Map<LO, GO, Node> map_type;
361
362 const LO numEntLO = static_cast<LO>(numEnt);
363 memcpy(numEntOut, &numEntLO, sizeof(LO));
364
365 if (this->supportsRowViews()) {
366 if (this->isLocallyIndexed()) {
367 // If the matrix is locally indexed on the calling process, we
368 // have to use its column Map (which it _must_ have in this
369 // case) to convert to global indices.
370 local_inds_host_view_type indIn;
371 values_host_view_type valIn;
372 this->getLocalRowView(lclRow, indIn, valIn);
373 const map_type& colMap = *(this->getColMap());
374 // Copy column indices one at a time, so that we don't need
375 // temporary storage.
376 for (size_t k = 0; k < numEnt; ++k) {
377 const GO gblIndIn = colMap.getGlobalElement(indIn[k]);
378 memcpy(indOut + k * sizeof(GO), &gblIndIn, sizeof(GO));
379 }
380 memcpy(valOut, valIn.data(), numEnt * sizeof(Scalar));
381 } else if (this->isGloballyIndexed()) {
382 // If the matrix is globally indexed on the calling process,
383 // then we can use the column indices directly. However, we
384 // have to get the global row index. The calling process must
385 // have a row Map, since otherwise it shouldn't be participating
386 // in packing operations.
387 global_inds_host_view_type indIn;
388 values_host_view_type valIn;
389 const map_type& rowMap = *(this->getRowMap());
390 const GO gblRow = rowMap.getGlobalElement(lclRow);
391 this->getGlobalRowView(gblRow, indIn, valIn);
392 memcpy(indOut, indIn.data(), numEnt * sizeof(GO));
393 memcpy(valOut, valIn.data(), numEnt * sizeof(Scalar));
394 } else {
395 if (numEnt != 0) {
396 return false;
397 }
398 }
399 } else {
400 // FIXME (mfh 25 Jan 2015) Pass in valIn and indIn as scratch
401 // space, instead of allocating them on each call.
402 if (this->isLocallyIndexed()) {
403 nonconst_local_inds_host_view_type indIn("indIn", numEnt);
404 nonconst_values_host_view_type valIn("valIn", numEnt);
405 size_t theNumEnt = 0;
406 this->getLocalRowCopy(lclRow, indIn, valIn, theNumEnt);
407 if (theNumEnt != numEnt) {
408 return false;
409 }
410 const map_type& colMap = *(this->getColMap());
411 // Copy column indices one at a time, so that we don't need
412 // temporary storage.
413 for (size_t k = 0; k < numEnt; ++k) {
414 const GO gblIndIn = colMap.getGlobalElement(indIn[k]);
415 memcpy(indOut + k * sizeof(GO), &gblIndIn, sizeof(GO));
416 }
417 memcpy(valOut, valIn.data(), numEnt * sizeof(Scalar));
418 } else if (this->isGloballyIndexed()) {
419 nonconst_global_inds_host_view_type indIn("indIn", numEnt);
420 nonconst_values_host_view_type valIn("valIn", numEnt);
421 const map_type& rowMap = *(this->getRowMap());
422 const GO gblRow = rowMap.getGlobalElement(lclRow);
423 size_t theNumEnt = 0;
424 this->getGlobalRowCopy(gblRow, indIn, valIn, theNumEnt);
425 if (theNumEnt != numEnt) {
426 return false;
427 }
428 memcpy(indOut, indIn.data(), numEnt * sizeof(GO));
429 memcpy(valOut, valIn.data(), numEnt * sizeof(Scalar));
430 } else {
431 if (numEnt != 0) {
432 return false;
433 }
434 }
435 }
436 return true;
437}
438
439template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
440void RowMatrix<Scalar, LocalOrdinal, GlobalOrdinal, Node>::
441 packImpl(const Teuchos::ArrayView<const LocalOrdinal>& exportLIDs,
442 Teuchos::Array<char>& exports,
443 const Teuchos::ArrayView<size_t>& numPacketsPerLID,
444 size_t& constantNumPackets) const {
445 using Teuchos::Array;
446 using Teuchos::ArrayView;
447 using Teuchos::as;
448 using Teuchos::av_reinterpret_cast;
449 using Teuchos::RCP;
450 typedef LocalOrdinal LO;
451 typedef GlobalOrdinal GO;
452 typedef typename ArrayView<const LO>::size_type size_type;
453 const char tfecfFuncName[] = "packImpl: ";
454
455 const size_type numExportLIDs = exportLIDs.size();
457 numExportLIDs != numPacketsPerLID.size(), std::invalid_argument,
458 "exportLIDs.size() = " << numExportLIDs << " != numPacketsPerLID.size()"
459 " = "
460 << numPacketsPerLID.size() << ".");
461
462 // Setting this to zero tells the caller to expect a possibly
463 // different ("nonconstant") number of packets per local index
464 // (i.e., a possibly different number of entries per row).
466
467 // The pack buffer 'exports' enters this method possibly
468 // unallocated. Do the first two parts of "Count, allocate, fill,
469 // compute."
470 size_t totalNumEntries = 0;
471 allocatePackSpace(exports, totalNumEntries, exportLIDs);
472 const size_t bufSize = static_cast<size_t>(exports.size());
473
474 // Compute the number of "packets" (in this case, bytes) per
475 // export LID (in this case, local index of the row to send), and
476 // actually pack the data.
477 //
478 // FIXME (mfh 24 Feb 2013, 25 Jan 2015) This code is only correct
479 // if sizeof(Scalar) is a meaningful representation of the amount
480 // of data in a Scalar instance. (LO and GO are always built-in
481 // integer types.)
482
483 // Variables for error reporting in the loop.
484 size_type firstBadIndex = 0; // only valid if outOfBounds == true.
485 size_t firstBadOffset = 0; // only valid if outOfBounds == true.
486 size_t firstBadNumBytes = 0; // only valid if outOfBounds == true.
487 bool outOfBounds = false;
488 bool packErr = false;
489
490 char* const exportsRawPtr = exports.getRawPtr();
491 size_t offset = 0; // current index into 'exports' array.
492 for (size_type i = 0; i < numExportLIDs; ++i) {
493 const LO lclRow = exportLIDs[i];
494 const size_t numEnt = this->getNumEntriesInLocalRow(lclRow);
495
496 // Only pad this row if it has a nonzero number of entries.
497 if (numEnt == 0) {
498 numPacketsPerLID[i] = 0;
499 } else {
500 char* const numEntBeg = exportsRawPtr + offset;
501 char* const numEntEnd = numEntBeg + sizeof(LO);
502 char* const valBeg = numEntEnd;
503 char* const valEnd = valBeg + numEnt * sizeof(Scalar);
504 char* const indBeg = valEnd;
505 const size_t numBytes = sizeof(LO) +
506 numEnt * (sizeof(Scalar) + sizeof(GO));
507 if (offset > bufSize || offset + numBytes > bufSize) {
511 outOfBounds = true;
512 break;
513 }
514 packErr = !packRow(numEntBeg, valBeg, indBeg, numEnt, lclRow);
515 if (packErr) {
516 firstBadIndex = i;
517 firstBadOffset = offset;
518 firstBadNumBytes = numBytes;
519 break;
520 }
521 // numPacketsPerLID[i] is the number of "packets" in the
522 // current local row i. Packet=char (really "byte") so use
523 // the number of bytes of the packed data for that row.
524 numPacketsPerLID[i] = numBytes;
525 offset += numBytes;
526 }
527 }
528
529 // The point of these exceptions is to stop computation if the
530 // above checks found a bug. If HAVE_TPETRA_DEBUG is defined,
531 // Tpetra will do extra all-reduces to check for global
532 // consistency of the error state. Otherwise, throwing an
533 // exception here might cause deadlock, but we accept that as
534 // better than just continuing.
535 TEUCHOS_TEST_FOR_EXCEPTION(
536 outOfBounds, std::logic_error,
537 "First invalid offset into 'exports' "
538 "pack buffer at index i = "
539 << firstBadIndex << ". exportLIDs[i]: "
540 << exportLIDs[firstBadIndex] << ", bufSize: " << bufSize << ", offset: "
541 << firstBadOffset << ", numBytes: " << firstBadNumBytes << ".");
542 TEUCHOS_TEST_FOR_EXCEPTION(
543 packErr, std::logic_error, "First error in packRow() at index i = " << firstBadIndex << ". exportLIDs[i]: " << exportLIDs[firstBadIndex] << ", bufSize: " << bufSize << ", offset: " << firstBadOffset << ", numBytes: " << firstBadNumBytes << ".");
544}
545
546} // namespace Tpetra
547
548//
549// Explicit instantiation macro
550//
551// Must be expanded from within the Tpetra namespace!
552//
553
554#define TPETRA_ROWMATRIX_INSTANT(SCALAR, LO, GO, NODE) \
555 template class RowMatrix<SCALAR, LO, GO, NODE>;
556
557#endif // TPETRA_ROWMATRIX_DEF_HPP
Struct that holds views of the contents of a CrsMatrix.
Teuchos::RCP< const map_type > colMap
Col map for the original version of the matrix.
Teuchos::RCP< const map_type > domainMap
Domain map for original matrix.
Teuchos::RCP< const map_type > rowMap
Desired row map for "imported" version of the matrix.
A read-only, row-oriented interface to a sparse matrix.
virtual void pack(const Teuchos::ArrayView< const LocalOrdinal > &exportLIDs, Teuchos::Array< char > &exports, const Teuchos::ArrayView< size_t > &numPacketsPerLID, size_t &constantNumPackets) const
Pack this object's data for an Import or Export.
virtual ~RowMatrix()
Destructor (virtual for memory safety of derived classes).
virtual Teuchos::RCP< RowMatrix< Scalar, LocalOrdinal, GlobalOrdinal, Node > > add(const Scalar &alpha, const RowMatrix< Scalar, LocalOrdinal, GlobalOrdinal, Node > &A, const Scalar &beta, const Teuchos::RCP< const Map< LocalOrdinal, GlobalOrdinal, Node > > &domainMap=Teuchos::null, const Teuchos::RCP< const Map< LocalOrdinal, GlobalOrdinal, Node > > &rangeMap=Teuchos::null, const Teuchos::RCP< Teuchos::ParameterList > &params=Teuchos::null) const
Return a new RowMatrix which is the result of beta*this + alpha*A.
Namespace Tpetra contains the class and methods constituting the Tpetra library.