Zoltan2
Loading...
Searching...
No Matches
Zoltan2_Directory_Comm.hpp
Go to the documentation of this file.
1// @HEADER
2// *****************************************************************************
3// Zoltan2: A package of combinatorial algorithms for scientific computing
4//
5// Copyright 2012 NTESS and the Zoltan2 contributors.
6// SPDX-License-Identifier: BSD-3-Clause
7// *****************************************************************************
8// @HEADER
9
10#ifndef ZOLTAN2_DIRECTORY_COMM_H_
11#define ZOLTAN2_DIRECTORY_COMM_H_
12
13#include <Teuchos_CommHelpers.hpp>
14#include <vector>
15#include <mpi.h>
16#include <Teuchos_ArrayRCP.hpp>
17
18namespace Zoltan2 {
19
20class Zoltan2_Directory_Plan { /* data for mapping between decompositions */
21 public:
24
27
29
30 void print(const std::string& headerMessage) const;
31
32 Teuchos::ArrayRCP<int> procs_to; /* processors I'll send to */
33 Teuchos::ArrayRCP<int> procs_from; /* processors I'll receive from*/
34 Teuchos::ArrayRCP<int> lengths_to; /* # items I send in my messages */
35 Teuchos::ArrayRCP<int> lengths_from; /* # items I recv in my messages */
36
37 /* Following arrays used if send/recv data is packed contiguously */
38 Teuchos::ArrayRCP<int> starts_to; /* where in item lists each send starts */
39 Teuchos::ArrayRCP<int> starts_from; /* where in item lists each recv starts */
40
41 /* Following arrays used is send/recv data not packed contiguously */
42 Teuchos::ArrayRCP<int> indices_to; /* indices of items I send in my msgs */
43
44 /* ordered consistent with lengths_to */
45 Teuchos::ArrayRCP<int> indices_from; /* indices for where to put arriving data */
46
47 /* ordered consistent with lengths_from */
48
49 /* Above information is sufficient if items are all of the same size */
50 /* If item sizes are variable, then need following additional arrays */
51 Teuchos::ArrayRCP<int> sizes; /* size of each item to send (if items vary) */
52 bool using_sizes; /* may refactor this so it's out - tracks whether we are in size mode even if 0 size */
53
54 Teuchos::ArrayRCP<int> sizes_to; /* size of each msg to send (if items vary) */
55 Teuchos::ArrayRCP<int> sizes_from; /* size of each msg to recv (if items vary) */
56
57 /* Following used if send/recv data is packed contiguously & items vary */
58 Teuchos::ArrayRCP<int> starts_to_ptr; /* where in dense array sends starts */
59 Teuchos::ArrayRCP<int> starts_from_ptr; /* where in dense each recv starts */
60
61 /* Following used is send/recv data not packed contiguously & items vary */
62 Teuchos::ArrayRCP<int> indices_to_ptr; /* where to find items I send in my msgs */
63 /* ordered consistent with lengths_to */
64 Teuchos::ArrayRCP<int> indices_from_ptr; /* where to find items I recv */
65 /* ordered consistent with lengths_from */
66
67 /* Note: ALL above arrays include data for self-msg */
68
69 int nvals; /* number of values I own to start */
70 int nvals_recv; /* number of values I own after remapping */
71 int nrecvs; /* number of msgs I'll recv (w/o self_msg) */
72 int nsends; /* number of msgs I'll send (w/o self_msg) */
73 int self_msg; /* do I have data for myself? */
74 int max_send_size; /* size of longest message I send (w/o self) */
75 int total_recv_size; /* total amount of data I'll recv (w/ self) */
76 int maxed_recvs; /* use MPI_Alltoallv if too many receives */
77 Teuchos::RCP<const Teuchos::Comm<int> > comm; /* communicator */
78
79 Teuchos::ArrayRCP<Teuchos::RCP<Teuchos::CommRequest<int> > > request; /* MPI requests for posted recvs */
80
81 Zoltan2_Directory_Plan* plan_reverse; /* to support POST & WAIT */
82
83 Teuchos::ArrayRCP<char> recv_buff; /* To support POST & WAIT */
84 Teuchos::ArrayRCP<char> getRecvBuff() const { return recv_buff; }
85};
86
88 public:
90 int nvals, /* number of values I currently own */
91 const Teuchos::ArrayRCP<int> &assign, /* processor assignment for all values */
92 Teuchos::RCP<const Teuchos::Comm<int> > comm, /* communicator */
93 int tag); /* message tag I can use */
94
96
97 int do_forward(
98 int tag, /* message tag for communicating */
99 const Teuchos::ArrayRCP<char> &send_data, /* array of data I currently own */
100 int nbytes, /* msg size */
101 Teuchos::ArrayRCP<char> &recv_data); /* array of data to receive */
102
103 int do_reverse(
104 int tag, /* message tag for communicating */
105 const Teuchos::ArrayRCP<char> &send_data, /* array of data I currently own */
106 int nbytes, /* msg size */
107 const Teuchos::ArrayRCP<int> &sizes,
108 Teuchos::ArrayRCP<char> &recv_data); /* array of data owned after reverse */
109
110 int getNRec() const { return nrec; } /* accessor for nrec */
111
113 return plan_forward->total_recv_size;
114 }
115
116 int resize(const Teuchos::ArrayRCP<int> &sizes, int tag,
117 int *sum_recv_sizes);
118
119 private:
121 const Teuchos::ArrayRCP<int> &sizes, int tag, int *sum_recv_sizes);
122
123 int do_post(Zoltan2_Directory_Plan *plan, int tag,
124 const Teuchos::ArrayRCP<char> &send_data,
125 int nbytes, /* msg size */
126 Teuchos::ArrayRCP<char> &recv_data);
127
128 int do_wait(Zoltan2_Directory_Plan *plan, int tag,
129 const Teuchos::ArrayRCP<char> &send_data,
130 int nbytes, /* msg size */
131 Teuchos::ArrayRCP<char> &recv_data);
132
133 int do_all_to_all(Zoltan2_Directory_Plan *plan,
134 const Teuchos::ArrayRCP<char> &send_data,
135 int nbytes, /* msg size */
136 Teuchos::ArrayRCP<char> &recv_data);
137
138 int sort_ints(Teuchos::ArrayRCP<int> &vals_sort, Teuchos::ArrayRCP<int> &vals_other);
139
140 int invert_map(const Teuchos::ArrayRCP<int> &lengths_to,
141 const Teuchos::ArrayRCP<int> &procs_to, int nsends, int self_msg,
142 Teuchos::ArrayRCP<int> &lengths_from, Teuchos::ArrayRCP<int> &procs_from,
143 int *pnrecvs, int my_proc,int nprocs, int out_of_mem, int tag,
144 Teuchos::RCP<const Teuchos::Comm<int> > comm);
145
146 int exchange_sizes(const Teuchos::ArrayRCP<int> &sizes_to,
147 const Teuchos::ArrayRCP<int> &procs_to, int nsends,
148 int self_msg, Teuchos::ArrayRCP<int> &sizes_from,
149 const Teuchos::ArrayRCP<int> &procs_from,
150 int nrecvs, int *total_recv_size, int my_proc, int tag,
151 Teuchos::RCP<const Teuchos::Comm<int> > comm);
152
153 void free_reverse_plan(Zoltan2_Directory_Plan *plan);
154
155 int create_reverse_plan(int tag, const Teuchos::ArrayRCP<int> &sizes);
156
157 Teuchos::RCP<const Teuchos::Comm<int> > comm_;
158 Zoltan2_Directory_Plan * plan_forward; // for efficient MPI communication
159 int nrec;
160};
161
162// -----------------------------------------------------------------------------
163// TODO: Decide how to handle this code - copied from zoltan - some may be relic
164 /* Red Storm MPI permits a maximum of 2048 receives. We set our
165 * limit of posted receives to 2000, leaving some for the application.
166 */
167 #ifndef MPI_RECV_LIMIT
168 /* Decided for Trilinos v10/Zoltan v3.2 would almost always use */
169 /* MPI_Alltoall communication instead of point-to-point. */
170 /* August 2009 */
171 /* #define MPI_RECV_LIMIT 4 */
172
173 /* Decided for zoltan_gid_64 branch to always used posted receives because
174 * Alltoall requires that offsets be 32-bit integers. October 2010
175 */
176 #define MPI_RECV_LIMIT 0
177 /* #define MPI_RECV_LIMIT 2000 */
178 #endif
179// -----------------------------------------------------------------------------
180
181} // end namespace Zoltan2
182
183#endif
int do_reverse(int tag, const Teuchos::ArrayRCP< char > &send_data, int nbytes, const Teuchos::ArrayRCP< int > &sizes, Teuchos::ArrayRCP< char > &recv_data)
int do_forward(int tag, const Teuchos::ArrayRCP< char > &send_data, int nbytes, Teuchos::ArrayRCP< char > &recv_data)
int resize(const Teuchos::ArrayRCP< int > &sizes, int tag, int *sum_recv_sizes)
void getInvertedValues(Zoltan2_Directory_Plan *from)
void print(const std::string &headerMessage) const
Teuchos::ArrayRCP< char > getRecvBuff() const
Teuchos::ArrayRCP< Teuchos::RCP< Teuchos::CommRequest< int > > > request
Teuchos::RCP< const Teuchos::Comm< int > > comm
Teuchos::ArrayRCP< int > indices_from_ptr
Created by mbenlioglu on Aug 31, 2020.