Zoltan2
Loading...
Searching...
No Matches
multivectorTest.cpp
Go to the documentation of this file.
1// @HEADER
2// *****************************************************************************
3// Zoltan2: A package of combinatorial algorithms for scientific computing
4//
5// Copyright 2012 NTESS and the Zoltan2 contributors.
6// SPDX-License-Identifier: BSD-3-Clause
7// *****************************************************************************
8// @HEADER
9
10// Create a Tpetra::MultiVector, and time the following:
11// 1. Build a multivector with contiguous global ids.
12// 2. Migrate.
13// 3. Divide procs into two groups and build new a new multivector
14// in each group with the non-contiguous global ids of the migrated data.
15// 4. Migrate the new multivector in the subgroup.
16//
17// Repeat this using Epetra, and also Zoltan1.
18//
19// This mimics the recursive bisection algorithm in Zoltan2.
20//
21// Because global ordinals are "int" in the this test, Zoltan1 must be
22// configured with cmake option ZOLTAN_ENABLE_UINT_IDS.
23
24#include <Teuchos_DefaultComm.hpp>
25#include <Teuchos_TimeMonitor.hpp>
26#include <Tpetra_MultiVector.hpp>
27#include <Tpetra_Import.hpp>
28
29#include <Epetra_Vector.h>
30#include <Epetra_MpiComm.h>
31#include <Epetra_Import.h>
32
33#include <zoltan.h>
34
35#include <Zoltan2_Util.hpp>
36
37#include <string>
38#include <sstream>
39#include <iostream>
40
41using Teuchos::RCP;
42using Teuchos::rcp;
43using Teuchos::rcp_dynamic_cast;
44using Teuchos::ArrayRCP;
45using Teuchos::ArrayView;
46using Teuchos::TimeMonitor;
47using Teuchos::Time;
48
49#ifdef HAVE_MPI
50
51typedef int TPETRA_LO;
52typedef int TPETRA_GO;
53typedef unsigned int ZOLTAN_LO;
54typedef unsigned int ZOLTAN_GO;
55#define MPI_ZOLTAN_GO_TYPE MPI_UNSIGNED
56typedef double TPETRA_SCALAR;
57
58RCP<Time> tmvBuild;
59RCP<Time> tmvMigrate;
60RCP<Time> tmvBuildN;
61RCP<Time> tmvMigrateN;
62
63RCP<Time> emvBuild;
64RCP<Time> emvMigrate;
65RCP<Time> emvBuildN;
66RCP<Time> emvMigrateN;
67
68RCP<Time> ztnBuild;
69RCP<Time> ztnMigrate;
70RCP<Time> ztnBuildN;
71RCP<Time> ztnMigrateN;
72
73using Teuchos::MpiComm;
74using Teuchos::Comm;
75
76enum testConstants {COORDDIM=3};
77
78static void usage(char *arg[]){
79 std::cout << "Usage:" << std::endl;
80 std::cout << arg[0] << " {num coords}" << std::endl;
81}
82
84// Generate global Ids for different mappings
86TPETRA_LO numSequentialGlobalIds(TPETRA_GO numGlobalCoords, int nprocs, int rank)
87{
88 TPETRA_LO share = numGlobalCoords / nprocs;
89 TPETRA_LO extra = numGlobalCoords % nprocs;
90 TPETRA_LO numLocalCoords = share;
91 if (rank < extra)
92 numLocalCoords++;
93
94 return numLocalCoords;
95}
96template <typename T>
97void roundRobinGlobalIds(T numGlobalCoords, int nprocs, int rank,
98 T *&gids)
99{
100 // Local number of coordinates does not change.
101 T share = numGlobalCoords / nprocs;
102 T extra = numGlobalCoords % nprocs;
103 T numLocalCoords = share;
104 if (static_cast<T> (rank) < extra)
105 numLocalCoords++;
106
107 gids = new T [numLocalCoords];
108 if (!gids)
109 throw std::bad_alloc();
110
111 T next = 0;
112 for (T i=rank; i < numGlobalCoords; i+=nprocs)
113 gids[next++] = i;
114
115 return;
116}
117template <typename T>
118void subGroupGloballyIncreasingIds(T numGlobalCoords,
119 int nprocs, int rank, T *&gids)
120{
121 int numProcsLeftHalf = nprocs / 2;
122 T share = numGlobalCoords / nprocs;
123 T extra = numGlobalCoords % nprocs;
124 T numCoordsLeftHalf = 0;
125 T firstIdx = 0, endIdx = 0;
126
127 T endP = ((numProcsLeftHalf > rank) ? numProcsLeftHalf : rank);
128
129 for (T p=0; p < endP ; p++){
130 T numLocalCoords = share;
131 if (p < extra)
132 numLocalCoords++;
133
134 if (p < static_cast<T> (rank))
135 firstIdx += numLocalCoords;
136
137 if (p < static_cast<T> (numProcsLeftHalf))
138 numCoordsLeftHalf += numLocalCoords;
139 }
140
141 endIdx = firstIdx + share;
142 if (rank < int(extra))
143 endIdx++;
144
145 if (rank >= numProcsLeftHalf){
146 firstIdx -= numCoordsLeftHalf;
147 endIdx -= numCoordsLeftHalf;
148 }
149
150 int firstProc=0, endProc=0;
151
152 if (rank < numProcsLeftHalf){
153 firstProc = 0;
154 endProc = numProcsLeftHalf;
155 }
156 else{
157 firstProc = numProcsLeftHalf;
158 endProc = nprocs;
159 }
160
161 int numProcsInMyHalf = endProc - firstProc;
162
163 // Picture the round robin global ids as a matrix where the
164 // columns are the processes and row values are global ids.
165 // Row values start at 0 in the upper left corner and increase
166 // to nprocs-1 in the upper right corner. The values in
167 // the second row are nprocs greater than the first row,
168 // and so on.
169 //
170 // The processes were divided into two halves, represented
171 // by a vertical line through the matrix dividing the
172 // processes in the left half from the processes in the
173 // right half.
174 //
175 // Now we want to enumerate the global ids in my half
176 // in increasing order.
177
178 T numLocalCoords = endIdx - firstIdx;
179 gids = new T [numLocalCoords];
180 if (!gids)
181 throw std::bad_alloc();
182
183 T firstRow = firstIdx / numProcsInMyHalf;
184 T firstCol = (firstIdx % numProcsInMyHalf) + firstProc;
185 int next = 0;
186
187 for (T row = firstRow; static_cast<T> (next) < numLocalCoords; row++){
188 T firstGid = row * nprocs + firstCol;
189 for (T col = firstCol; static_cast<T> (next) < numLocalCoords && col < static_cast<T> (endProc); col++){
190 gids[next++] = firstGid++;
191 }
192 firstCol = firstProc;
193 }
194}
195
196void timeEpetra(TPETRA_GO numGlobalCoords, const RCP<const MpiComm<int> > &comm, bool);
197void
198timeTpetra (const TPETRA_GO numGlobalCoords,
199 const Teuchos::RCP<const Teuchos::Comm<int> >& comm,
200 const bool doMemory);
201void timeZoltan(ZOLTAN_GO numGlobalCoords, bool);
202
204// Main
206
207int main(int narg, char *arg[])
208{
209 Tpetra::ScopeGuard tscope(&narg, &arg);
210 Teuchos::RCP<const Teuchos::Comm<int> > genComm = Tpetra::getDefaultComm();
211 Teuchos::RCP<const MpiComm<int> > comm =
212 rcp_dynamic_cast<const MpiComm<int> >(genComm);
213
214 int rank = genComm->getRank();
215
216 if (narg < 2){
217 if (rank == 0)
218 usage(arg);
219 return 1;
220 }
221
222 TPETRA_GO numGlobalCoords = 0;
223 std::string theArg(arg[1]);
224 std::istringstream iss(theArg);
225 iss >> numGlobalCoords;
226 if (numGlobalCoords < genComm->getSize()){
227 if (rank == 0)
228 usage(arg);
229 return 1;
230 }
231
232 if (rank == 0)
233 std::cout << numGlobalCoords << " coordinates" << std::endl;
234
235 tmvBuild = TimeMonitor::getNewTimer("CONSEC build Tpetra");
236 tmvMigrate = TimeMonitor::getNewTimer("CONSEC migrate Tpetra");
237 tmvBuildN = TimeMonitor::getNewTimer("!CONSEC build Tpetra");
238 tmvMigrateN = TimeMonitor::getNewTimer("!CONSEC migrate Tpetra");
239
240 ztnBuild = TimeMonitor::getNewTimer("CONSEC build Zoltan1");
241 ztnMigrate = TimeMonitor::getNewTimer("CONSEC migrate Zoltan1");
242 ztnBuildN = TimeMonitor::getNewTimer("!CONSEC build Zoltan1");
243 ztnMigrateN = TimeMonitor::getNewTimer("!CONSEC migrate Zoltan1");
244
245 emvBuild = TimeMonitor::getNewTimer("CONSEC build Epetra");
246 emvMigrate = TimeMonitor::getNewTimer("CONSEC migrate Epetra");
247 emvBuildN = TimeMonitor::getNewTimer("!CONSEC build Epetra");
248 emvMigrateN = TimeMonitor::getNewTimer("!CONSEC migrate Epetra");
249
250 TimeMonitor::zeroOutTimers();
251
252 int ntests = 3;
253
254 // Test with Zoltan_Comm and Zoltan_DataDirectory
255
256 for (int i=0; i < ntests; i++){
257 if (rank == 0)
258 std::cout << "Zoltan test " << i+1 << std::endl;
259 timeZoltan(numGlobalCoords, i==ntests-1);
260 }
261
262 // Test with Epetra_MultiVector
263
264 for (int i=0; i < ntests; i++){
265 if (rank == 0)
266 std::cout << "Epetra test " << i+1 << std::endl;
267 timeEpetra(numGlobalCoords, comm, i==ntests-1);
268 }
269
270 // Test with Tpetra::MultiVector
271
272 for (int i=0; i < ntests; i++){
273 if (rank == 0)
274 std::cout << "Tpetra test " << i+1 << std::endl;
275 timeTpetra(numGlobalCoords, comm, i==ntests-1);
276 }
277
278 // Report
279
280 TimeMonitor::summarize();
281
282 if (comm->getRank() == 0)
283 std::cout << "PASS" << std::endl;
284}
285
286void
287timeTpetra (const TPETRA_GO numGlobalCoords,
288 const Teuchos::RCP<const Teuchos::Comm<int> >& comm,
289 const bool doMemory)
290{
291 using Teuchos::arcp;
292 using Teuchos::ArrayRCP;
293 using Teuchos::ArrayView;
294 using Teuchos::Comm;
295 using Teuchos::RCP;
296 using Teuchos::rcp;
297 typedef Tpetra::Map<TPETRA_LO, TPETRA_GO> map_type;
298 typedef Tpetra::MultiVector<TPETRA_SCALAR, TPETRA_LO, TPETRA_GO> MV;
299 typedef ArrayView<const TPETRA_SCALAR> coordList_t;
300
301 const int nprocs = comm->getSize ();
302 const int rank = comm->getRank ();
303
305 // Create a MV with contiguous global IDs
306
307 const TPETRA_LO numLocalCoords = numSequentialGlobalIds(numGlobalCoords, nprocs, rank);
308
309 RCP<const map_type> tmap;
310 RCP<MV> mvector;
311 TPETRA_SCALAR* coords = NULL;
312 {
313 Teuchos::TimeMonitor timeMon (*tmvBuild);
314
315 tmap = rcp (new map_type (numGlobalCoords, numLocalCoords, 0, comm));
316
317 coords = new TPETRA_SCALAR [COORDDIM * numLocalCoords];
318 memset (coords, 0, sizeof(TPETRA_SCALAR) * numLocalCoords * COORDDIM);
319
320 coordList_t *avList = new coordList_t [COORDDIM];
321 TPETRA_LO offset = 0;
322
323 for (int dim = 0; dim < COORDDIM; ++dim) {
324 avList[dim] = coordList_t(coords + offset, numLocalCoords);
325 offset += numLocalCoords;
326 }
327
328 ArrayRCP<const coordList_t> vectors = arcp (avList, 0, COORDDIM);
329 mvector = rcp (new MV (tmap, vectors.view (0, COORDDIM), COORDDIM));
330 }
331
332 if (rank == 0 && doMemory) {
333 const long nkb = Zoltan2::getProcessKilobytes ();
334 std::cout << "Create mvector 1: " << nkb << std::endl;
335 }
336
338 // Migrate the MV.
339
340 ArrayRCP<const TPETRA_GO> newGidArray;
341 {
342 TPETRA_GO *newGids = NULL;
343 roundRobinGlobalIds<TPETRA_GO> (numGlobalCoords, nprocs, rank, newGids);
344 newGidArray = arcp<const TPETRA_GO> (newGids, 0, numLocalCoords, true);
345 }
346
347 RCP<const map_type> newTmap;
348 RCP<Tpetra::Import<TPETRA_LO, TPETRA_GO> > importer;
349 RCP<MV> newMvector;
350 {
351 Teuchos::TimeMonitor timeMon (*tmvMigrate);
352
353 newTmap = rcp (new map_type (numGlobalCoords, newGidArray.view(0, numLocalCoords), 0, comm));
354 importer = rcp (new Tpetra::Import<TPETRA_LO, TPETRA_GO> (tmap, newTmap));
355 newMvector = rcp (new MV (newTmap, COORDDIM, true));
356
357 newMvector->doImport (*mvector, *importer, Tpetra::INSERT);
358 mvector = newMvector;
359 }
360
361 delete [] coords;
362
363 if (rank == 0 && doMemory) {
364 const long nkb = Zoltan2::getProcessKilobytes ();
365 std::cout << "Create mvector 2: " << nkb << std::endl;
366 }
367
369 // Divide processes into two halves.
370
371 RCP<Comm<int> > subComm;
372 {
373 int groupSize = 0;
374 int leftHalfNumProcs = nprocs / 2;
375 int *myHalfProcs = NULL;
376
377 if (rank < leftHalfNumProcs){
378 groupSize = leftHalfNumProcs;
379 myHalfProcs = new int [groupSize];
380 for (int i=0; i < groupSize; i++)
381 myHalfProcs[i] = i;
382 }
383 else {
384 groupSize = nprocs - leftHalfNumProcs;
385 myHalfProcs = new int [groupSize];
386 int firstNum = leftHalfNumProcs;
387 for (int i=0; i < groupSize; i++)
388 myHalfProcs[i] = firstNum++;
389 }
390
391 ArrayView<const int> idView(myHalfProcs, groupSize);
392 subComm = comm->createSubcommunicator (idView);
393 delete [] myHalfProcs;
394 }
395
396 // Divide the multivector into two. Each process group is creating
397 // a multivector with non-contiguous global ids. For one group,
398 // base gid is not 0.
399
400 size_t globalSize = Teuchos::OrdinalTraits<size_t>::invalid ();
401 RCP<map_type> subMap;
402 RCP<MV> subMvector;
403 {
404 Teuchos::TimeMonitor timeMon (*tmvBuildN);
405
406 ArrayView<const TPETRA_GO> gidList = mvector->getMap ()->getLocalElementList ();
407 subMap = rcp (new map_type (globalSize, gidList, 0, subComm));
408 globalSize = subMap->getGlobalNumElements ();
409
410 // Get a view of the block of rows to copy.
411 RCP<MV> tmp = mvector->offsetViewNonConst (subMap, 0);
412 // Create a new multivector to hold the group's rows.
413 subMvector = rcp (new MV (subMap, mvector->getNumVectors ()));
414 // Copy the view into the new multivector.
415 Tpetra::deep_copy (*subMvector, *tmp);
416 }
417
419 // Each subgroup migrates the sub-multivector so the
420 // global Ids are increasing with process rank.
421
422 TPETRA_GO *increasingGids = NULL;
423 subGroupGloballyIncreasingIds<TPETRA_GO> (numGlobalCoords, nprocs,
424 rank, increasingGids);
425
426 ArrayRCP<const TPETRA_GO> incrGidArray (increasingGids, 0, numLocalCoords, true);
427
428 RCP<const map_type> newSubMap;
429 RCP<Tpetra::Import<TPETRA_LO, TPETRA_GO> > subImporter;
430 RCP<MV> newSubMvector;
431 {
432 Teuchos::TimeMonitor timeMon (*tmvMigrateN);
433
434 newSubMap =
435 rcp (new map_type (globalSize, incrGidArray.view (0, numLocalCoords),
436 0, subComm));
437 subImporter = rcp (new Tpetra::Import<TPETRA_LO, TPETRA_GO> (subMap, newSubMap));
438 newSubMvector = rcp (new MV (newSubMap, COORDDIM, true));
439 newSubMvector->doImport (*subMvector, *subImporter, Tpetra::INSERT);
440 mvector = newSubMvector;
441 }
442}
443
444void timeEpetra(TPETRA_GO numGlobalCoords, const RCP<const MpiComm<int> > &comm,
445 bool doMemory)
446{
447 RCP<const Teuchos::OpaqueWrapper<MPI_Comm> > commPtr =
448 comm->getRawMpiComm();
449
450 RCP<Epetra_MpiComm> ecomm = rcp(new Epetra_MpiComm((*commPtr)()));
451
452 int nprocs = comm->getSize();
453 int rank = comm->getRank();
454
456 // Create a MV with contiguous global IDs
457
458 TPETRA_LO numLocalCoords = numSequentialGlobalIds(numGlobalCoords, nprocs, rank);
459
460 emvBuild->start();
461 emvBuild->incrementNumCalls();
462
463 RCP<Epetra_BlockMap> emap = rcp(new Epetra_BlockMap(numGlobalCoords,
464 numLocalCoords, 1, 0, *ecomm));
465
466 TPETRA_SCALAR *coords = new TPETRA_SCALAR [COORDDIM * numLocalCoords];
467 memset(coords, 0, sizeof(TPETRA_SCALAR) * numLocalCoords * COORDDIM);
468
469 RCP<Epetra_MultiVector> mvector = rcp(new Epetra_MultiVector(
470 View, *emap, coords, 1, COORDDIM));
471
472 emvBuild->stop();
473
474 if (rank==0 && doMemory){
475 long nkb = Zoltan2::getProcessKilobytes();
476 std::cout << "Create mvector 1: " << nkb << std::endl;;
477 }
478
480 // Migrate the MV.
481
482 TPETRA_GO *newGids = NULL;
483 roundRobinGlobalIds<TPETRA_GO>(numGlobalCoords, nprocs, rank, newGids);
484
485 emvMigrate->start();
486 emvMigrate->incrementNumCalls();
487
488 RCP<Epetra_BlockMap> newMap = rcp(new Epetra_BlockMap(numGlobalCoords,
489 numLocalCoords, newGids, 1, 0, *ecomm));
490
491 RCP<Epetra_Import> importer = rcp(new Epetra_Import(*newMap, *emap));
492
493 RCP<Epetra_MultiVector> newMvector = rcp(new Epetra_MultiVector(
494 *newMap, COORDDIM));
495
496 newMvector->Import(*mvector, *importer, Insert);
497
498 mvector = newMvector;
499
500 emvMigrate->stop();
501
502 delete [] coords;
503
504 if (rank==0 && doMemory){
505 long nkb = Zoltan2::getProcessKilobytes();
506 std::cout << "Create mvector 2: " << nkb << std::endl;;
507 }
508
510 // Divide processes into two halves.
511
512 int groupSize = 0;
513 int leftHalfNumProcs = nprocs / 2;
514 int *myHalfProcs = NULL;
515
516 if (rank < leftHalfNumProcs){
517 groupSize = leftHalfNumProcs;
518 myHalfProcs = new int [groupSize];
519 for (int i=0; i < groupSize; i++)
520 myHalfProcs[i] = i;
521 }
522 else {
523 groupSize = nprocs - leftHalfNumProcs;
524 myHalfProcs = new int [groupSize];
525 int firstNum = leftHalfNumProcs;
526 for (int i=0; i < groupSize; i++)
527 myHalfProcs[i] = firstNum++;
528 }
529
530 ArrayView<const int> idView(myHalfProcs, groupSize);
531 // TODO - memory leak in createSubcommunicator.
532 RCP<Comm<int> > newComm = comm->createSubcommunicator(idView);
533 RCP<MpiComm<int> > genSubComm = rcp_dynamic_cast<MpiComm<int> >(newComm);
534
535 commPtr = genSubComm->getRawMpiComm();
536
537 RCP<Epetra_MpiComm> subComm = rcp(new Epetra_MpiComm((*commPtr)()));
538
539 delete [] myHalfProcs;
540
541 // Divide the multivector into two. Each process group is creating
542 // a multivector with non-contiguous global ids. For one group,
543 // base gid is not 0.
544
545 emvBuildN->start();
546 emvBuildN->incrementNumCalls();
547
548 RCP<Epetra_BlockMap> subMap = rcp(new Epetra_BlockMap(-1,
549 numLocalCoords, newGids, 1, 0, *subComm));
550
551 TPETRA_SCALAR **avSubList = new TPETRA_SCALAR * [COORDDIM];
552
553 for (int dim=0; dim < COORDDIM; dim++)
554 (*mvector)(dim)->ExtractView(avSubList + dim);
555
556 RCP<Epetra_MultiVector> subMvector = rcp(new Epetra_MultiVector(
557 View, *subMap, avSubList, COORDDIM));
558
559 mvector = subMvector;
560
561 delete [] avSubList;
562 delete [] newGids;
563
564 emvBuildN->stop();
565
567 // Each subgroup migrates the sub-multivector so the
568 // global Ids are increasing with process rank.
569
570 TPETRA_GO *increasingGids = NULL;
571 subGroupGloballyIncreasingIds<TPETRA_GO>(numGlobalCoords, nprocs, rank,
572 increasingGids);
573
574 emvMigrateN->start();
575 emvMigrateN->incrementNumCalls();
576
577 RCP<Epetra_BlockMap> newSubMap = rcp(new Epetra_BlockMap(-1,
578 numLocalCoords, increasingGids, 1, 0, *subComm));
579
580 RCP<Epetra_Import> subImporter = rcp(new Epetra_Import(
581 *subMap, *newSubMap));
582
583 RCP<Epetra_MultiVector> newSubMvector = rcp(new Epetra_MultiVector(
584 *newSubMap, COORDDIM));
585
586 newSubMvector->Import(*subMvector, *subImporter, Insert);
587
588 mvector = newSubMvector;
589
590 emvMigrateN->stop();
591
592 delete [] increasingGids;
593}
594
595void timeZoltan(ZOLTAN_GO numGlobalCoords,
596 bool doMemory)
597{
598 int nprocs, rank;
599 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
600 MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
601
603 // Create a global data directory with contiguous global IDs.
604 // (We don't need this, but it is analygous to a Tpetra::Map.)
605
606 ZOLTAN_LO numLocalCoords = numSequentialGlobalIds(numGlobalCoords, nprocs, rank);
607
608 ZOLTAN_GO offset=0;
609 MPI_Exscan(&numLocalCoords, &offset, 1,
610 MPI_ZOLTAN_GO_TYPE, MPI_SUM, MPI_COMM_WORLD);
611
612 ZOLTAN_GO *gids = new ZOLTAN_GO [numLocalCoords];
613 for (ZOLTAN_LO i=0; i < numLocalCoords; i++){
614 gids[i] = offset++;
615 }
616
617 ztnBuild->start();
618 ztnBuild->incrementNumCalls();
619
620 struct Zoltan_DD_Struct *dd = NULL;
621 int rc = Zoltan_DD_Create(&dd, MPI_COMM_WORLD, 1, 1, 0, numLocalCoords, 0);
622 if (rc != ZOLTAN_OK)
623 exit(1);
624
625 rc = Zoltan_DD_Update(dd, gids, NULL, NULL, NULL, numLocalCoords);
626
627 // Create an array of coordinates associated with the global Ids.
628
629 TPETRA_SCALAR *coords = new TPETRA_SCALAR [COORDDIM * numLocalCoords];
630 memset(coords, 0, sizeof(TPETRA_SCALAR) * numLocalCoords * COORDDIM);
631
632 ztnBuild->stop();
633
634 if (rank==0 && doMemory){
635 long nkb = Zoltan2::getProcessKilobytes();
636 std::cout << "Create mvector 1: " << nkb << std::endl;;
637 }
638
639 Zoltan_DD_Destroy(&dd);
640
642 // Migrate the array of coordinates.
643
644 ZOLTAN_GO *newGids = NULL;
645 roundRobinGlobalIds<ZOLTAN_GO>(numGlobalCoords, nprocs, rank, newGids);
646
647 ztnMigrate->start();
648 ztnMigrate->incrementNumCalls();
649
650 struct Zoltan_DD_Struct *ddNew = NULL; // new "map"
651 rc = Zoltan_DD_Create(&ddNew, MPI_COMM_WORLD, 1, 1, 0, numLocalCoords, 0);
652 if (rc != ZOLTAN_OK)
653 exit(1);
654
655 rc = Zoltan_DD_Update(ddNew, newGids, NULL, NULL, NULL, numLocalCoords);
656 if (rc != ZOLTAN_OK)
657 exit(1);
658
659 int *procOwners = new int [numLocalCoords]; // procs to get my data
660 rc = Zoltan_DD_Find(ddNew, gids, NULL, NULL, NULL,
661 numLocalCoords, procOwners);
662 if (rc != ZOLTAN_OK)
663 exit(1);
664
665 Zoltan_DD_Destroy(&ddNew);
666
667 struct Zoltan_Comm_Obj *commPlan = NULL; // global communication plan
668 int tag = 10000;
669 int numReceive = 0;
670
671 rc = Zoltan_Comm_Create(&commPlan, numLocalCoords, procOwners, MPI_COMM_WORLD,
672 tag, &numReceive);
673 if (rc != ZOLTAN_OK)
674 exit(1);
675
676 TPETRA_SCALAR *newCoords = new TPETRA_SCALAR [COORDDIM * numReceive];
677
678 tag = 11000;
679
680 // To prevent compile warnings or errors
681 void *x = static_cast<void *>(coords);
682 char *charCoords = static_cast<char *>(x);
683 x = static_cast<void *>(newCoords);
684 char *charNewCoords = static_cast<char *>(x);
685
686 rc = Zoltan_Comm_Do(commPlan, tag, charCoords,
687 sizeof(TPETRA_SCALAR)*COORDDIM, charNewCoords);
688
689 if (rc != ZOLTAN_OK)
690 exit(1);
691
692 ztnMigrate->stop();
693
694 Zoltan_Comm_Destroy(&commPlan);
695 delete [] coords;
696 delete [] gids;
697
698 if (rank==0 && doMemory){
699 long nkb = Zoltan2::getProcessKilobytes();
700 std::cout << "Create mvector 2: " << nkb << std::endl;;
701 }
702
704 // Divide processes into two halves.
705
706 int groupSize = 0;
707 int leftHalfNumProcs = nprocs / 2;
708 int *myHalfProcs = NULL;
709
710 if (rank < leftHalfNumProcs){
711 groupSize = leftHalfNumProcs;
712 myHalfProcs = new int [groupSize];
713 for (int i=0; i < groupSize; i++)
714 myHalfProcs[i] = i;
715 }
716 else {
717 groupSize = nprocs - leftHalfNumProcs;
718 myHalfProcs = new int [groupSize];
719 for (int i=0; i < groupSize; i++)
720 myHalfProcs[i] = i + leftHalfNumProcs;
721 }
722
723 MPI_Group group, subGroup;
724 MPI_Comm subComm;
725
726 MPI_Comm_group(MPI_COMM_WORLD, &group);
727 MPI_Group_incl(group, groupSize, myHalfProcs, &subGroup);
728 MPI_Comm_create(MPI_COMM_WORLD, subGroup, &subComm);
729 MPI_Group_free(&subGroup);
730
731 delete [] myHalfProcs;
732
733 // Create global data directories for our sub groups.
734 // (Analogous to creating the new MultiVectors in Tpetra.)
735
736 ztnBuildN->start();
737 ztnBuildN->incrementNumCalls();
738
739 struct Zoltan_DD_Struct *ddSub = NULL; // subgroup "map"
740 rc = Zoltan_DD_Create(&ddSub, subComm, 1, 1, 0, numLocalCoords, 0);
741 if (rc != ZOLTAN_OK)
742 exit(1);
743
744 rc = Zoltan_DD_Update(ddSub, newGids, NULL, NULL, NULL, numLocalCoords);
745 if (rc != ZOLTAN_OK)
746 exit(1);
747
748 ztnBuildN->stop();
749
750 Zoltan_DD_Destroy(&ddSub);
751
753 // Each subgroup migrates the sub-arrays so the
754 // global Ids are again increasing with process rank.
755
756 ZOLTAN_GO *increasingGids = NULL;
757 subGroupGloballyIncreasingIds<ZOLTAN_GO>(
758 numGlobalCoords, nprocs, rank, increasingGids);
759
760 // Global "map" corresponding to new contiguous ids.
761
762 ztnMigrateN->start();
763 ztnMigrateN->incrementNumCalls();
764
765 struct Zoltan_DD_Struct *ddNewSub = NULL;
766 rc = Zoltan_DD_Create(&ddNewSub, subComm, 1, 1, 0, numLocalCoords, 0);
767 if (rc != ZOLTAN_OK)
768 exit(1);
769
770 rc = Zoltan_DD_Update(ddNewSub, increasingGids, NULL, NULL, NULL,
771 numLocalCoords);
772
773 // Which processes gets my current coordinates in new map?
774
775 rc = Zoltan_DD_Find(ddNewSub, newGids, NULL, NULL, NULL, numLocalCoords, procOwners);
776 if (rc != ZOLTAN_OK)
777 exit(1);
778
779 delete [] newGids;
780
781 Zoltan_DD_Destroy(&ddNewSub);
782
783 struct Zoltan_Comm_Obj *subCommPlan = NULL; // global communication plan
784 tag = 12000;
785
786 rc = Zoltan_Comm_Create(&subCommPlan, numLocalCoords, procOwners, subComm,
787 tag, &numReceive);
788 if (rc != ZOLTAN_OK)
789 exit(1);
790
791 delete [] procOwners;
792
793 TPETRA_SCALAR *newContigCoords = new TPETRA_SCALAR [COORDDIM * numReceive];
794
795 tag = 13000;
796 // To prevent compile warnings or errors
797 x = static_cast<void *>(newContigCoords);
798 char *charNewContigCoords = static_cast<char *>(x);
799
800 rc = Zoltan_Comm_Do(subCommPlan, tag, charNewCoords,
801 sizeof(TPETRA_SCALAR)*COORDDIM, charNewContigCoords);
802 if (rc != ZOLTAN_OK)
803 exit(1);
804
805 ztnMigrateN->stop();
806
807 delete [] newCoords;
808 delete [] newContigCoords;
809 delete [] increasingGids;
810 Zoltan_Comm_Destroy(&subCommPlan);
811}
812#else
813int main(int narg, char *arg[])
814{
815 Tpetra::ScopeGuard tscope(&narg, &arg);
816 Teuchos::RCP<const Teuchos::Comm<int> > genComm = Tpetra::getDefaultComm();
817
818 if (genComm->getRank() == 0){
819 std::cout << "Test not run because MPI is not available." << std::endl;
820 std::cout << "PASS" << std::endl;
821 }
822 return 0;
823}
824#endif // HAVE_MPI
A gathering of useful namespace methods.
int main()
long getProcessKilobytes()