Zoltan2
Loading...
Searching...
No Matches
Zoltan2_Directory_Comm.hpp
Go to the documentation of this file.
1/*
2 * @HEADER
3 *
4 * ***********************************************************************
5 *
6 * Zoltan2 Directory for Load-balancing, Partitioning, Ordering and Coloring
7 * Copyright 2012 Sandia Corporation
8 *
9 * Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
10 * the U.S. Government retains certain rights in this software.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions are
14 * met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 *
23 * 3. Neither the name of the Corporation nor the names of the
24 * contributors may be used to endorse or promote products derived from
25 * this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
28 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
31 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
33 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
34 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
35 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
36 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 *
39 * Questions? Contact Karen Devine kddevin@sandia.gov
40 * Erik Boman egboman@sandia.gov
41 *
42 * ***********************************************************************
43 *
44 * @HEADER
45 */
46
47#ifndef ZOLTAN2_DIRECTORY_COMM_H_
48#define ZOLTAN2_DIRECTORY_COMM_H_
49
50#include <Teuchos_CommHelpers.hpp>
51#include <vector>
52#include <mpi.h>
53#include <Teuchos_ArrayRCP.hpp>
54
55namespace Zoltan2 {
56
57class Zoltan2_Directory_Plan { /* data for mapping between decompositions */
58 public:
61
64
66
67 void print(const std::string& headerMessage) const;
68
69 Teuchos::ArrayRCP<int> procs_to; /* processors I'll send to */
70 Teuchos::ArrayRCP<int> procs_from; /* processors I'll receive from*/
71 Teuchos::ArrayRCP<int> lengths_to; /* # items I send in my messages */
72 Teuchos::ArrayRCP<int> lengths_from; /* # items I recv in my messages */
73
74 /* Following arrays used if send/recv data is packed contiguously */
75 Teuchos::ArrayRCP<int> starts_to; /* where in item lists each send starts */
76 Teuchos::ArrayRCP<int> starts_from; /* where in item lists each recv starts */
77
78 /* Following arrays used is send/recv data not packed contiguously */
79 Teuchos::ArrayRCP<int> indices_to; /* indices of items I send in my msgs */
80
81 /* ordered consistent with lengths_to */
82 Teuchos::ArrayRCP<int> indices_from; /* indices for where to put arriving data */
83
84 /* ordered consistent with lengths_from */
85
86 /* Above information is sufficient if items are all of the same size */
87 /* If item sizes are variable, then need following additional arrays */
88 Teuchos::ArrayRCP<int> sizes; /* size of each item to send (if items vary) */
89 bool using_sizes; /* may refactor this so it's out - tracks whether we are in size mode even if 0 size */
90
91 Teuchos::ArrayRCP<int> sizes_to; /* size of each msg to send (if items vary) */
92 Teuchos::ArrayRCP<int> sizes_from; /* size of each msg to recv (if items vary) */
93
94 /* Following used if send/recv data is packed contiguously & items vary */
95 Teuchos::ArrayRCP<int> starts_to_ptr; /* where in dense array sends starts */
96 Teuchos::ArrayRCP<int> starts_from_ptr; /* where in dense each recv starts */
97
98 /* Following used is send/recv data not packed contiguously & items vary */
99 Teuchos::ArrayRCP<int> indices_to_ptr; /* where to find items I send in my msgs */
100 /* ordered consistent with lengths_to */
101 Teuchos::ArrayRCP<int> indices_from_ptr; /* where to find items I recv */
102 /* ordered consistent with lengths_from */
103
104 /* Note: ALL above arrays include data for self-msg */
105
106 int nvals; /* number of values I own to start */
107 int nvals_recv; /* number of values I own after remapping */
108 int nrecvs; /* number of msgs I'll recv (w/o self_msg) */
109 int nsends; /* number of msgs I'll send (w/o self_msg) */
110 int self_msg; /* do I have data for myself? */
111 int max_send_size; /* size of longest message I send (w/o self) */
112 int total_recv_size; /* total amount of data I'll recv (w/ self) */
113 int maxed_recvs; /* use MPI_Alltoallv if too many receives */
114 Teuchos::RCP<const Teuchos::Comm<int> > comm; /* communicator */
115
116 Teuchos::ArrayRCP<Teuchos::RCP<Teuchos::CommRequest<int> > > request; /* MPI requests for posted recvs */
117
118 Zoltan2_Directory_Plan* plan_reverse; /* to support POST & WAIT */
119
120 Teuchos::ArrayRCP<char> recv_buff; /* To support POST & WAIT */
121 Teuchos::ArrayRCP<char> getRecvBuff() const { return recv_buff; }
122};
123
125 public:
127 int nvals, /* number of values I currently own */
128 const Teuchos::ArrayRCP<int> &assign, /* processor assignment for all values */
129 Teuchos::RCP<const Teuchos::Comm<int> > comm, /* communicator */
130 int tag); /* message tag I can use */
131
133
134 int do_forward(
135 int tag, /* message tag for communicating */
136 const Teuchos::ArrayRCP<char> &send_data, /* array of data I currently own */
137 int nbytes, /* msg size */
138 Teuchos::ArrayRCP<char> &recv_data); /* array of data to receive */
139
140 int do_reverse(
141 int tag, /* message tag for communicating */
142 const Teuchos::ArrayRCP<char> &send_data, /* array of data I currently own */
143 int nbytes, /* msg size */
144 const Teuchos::ArrayRCP<int> &sizes,
145 Teuchos::ArrayRCP<char> &recv_data); /* array of data owned after reverse */
146
147 int getNRec() const { return nrec; } /* accessor for nrec */
148
150 return plan_forward->total_recv_size;
151 }
152
153 int resize(const Teuchos::ArrayRCP<int> &sizes, int tag,
154 int *sum_recv_sizes);
155
156 private:
158 const Teuchos::ArrayRCP<int> &sizes, int tag, int *sum_recv_sizes);
159
160 int do_post(Zoltan2_Directory_Plan *plan, int tag,
161 const Teuchos::ArrayRCP<char> &send_data,
162 int nbytes, /* msg size */
163 Teuchos::ArrayRCP<char> &recv_data);
164
165 int do_wait(Zoltan2_Directory_Plan *plan, int tag,
166 const Teuchos::ArrayRCP<char> &send_data,
167 int nbytes, /* msg size */
168 Teuchos::ArrayRCP<char> &recv_data);
169
170 int do_all_to_all(Zoltan2_Directory_Plan *plan,
171 const Teuchos::ArrayRCP<char> &send_data,
172 int nbytes, /* msg size */
173 Teuchos::ArrayRCP<char> &recv_data);
174
175 int sort_ints(Teuchos::ArrayRCP<int> &vals_sort, Teuchos::ArrayRCP<int> &vals_other);
176
177 int invert_map(const Teuchos::ArrayRCP<int> &lengths_to,
178 const Teuchos::ArrayRCP<int> &procs_to, int nsends, int self_msg,
179 Teuchos::ArrayRCP<int> &lengths_from, Teuchos::ArrayRCP<int> &procs_from,
180 int *pnrecvs, int my_proc,int nprocs, int out_of_mem, int tag,
181 Teuchos::RCP<const Teuchos::Comm<int> > comm);
182
183 int exchange_sizes(const Teuchos::ArrayRCP<int> &sizes_to,
184 const Teuchos::ArrayRCP<int> &procs_to, int nsends,
185 int self_msg, Teuchos::ArrayRCP<int> &sizes_from,
186 const Teuchos::ArrayRCP<int> &procs_from,
187 int nrecvs, int *total_recv_size, int my_proc, int tag,
188 Teuchos::RCP<const Teuchos::Comm<int> > comm);
189
190 void free_reverse_plan(Zoltan2_Directory_Plan *plan);
191
192 int create_reverse_plan(int tag, const Teuchos::ArrayRCP<int> &sizes);
193
194 Teuchos::RCP<const Teuchos::Comm<int> > comm_;
195 Zoltan2_Directory_Plan * plan_forward; // for efficient MPI communication
196 int nrec;
197};
198
199// -----------------------------------------------------------------------------
200// TODO: Decide how to handle this code - copied from zoltan - some may be relic
201 /* Red Storm MPI permits a maximum of 2048 receives. We set our
202 * limit of posted receives to 2000, leaving some for the application.
203 */
204 #ifndef MPI_RECV_LIMIT
205 /* Decided for Trilinos v10/Zoltan v3.2 would almost always use */
206 /* MPI_Alltoall communication instead of point-to-point. */
207 /* August 2009 */
208 /* #define MPI_RECV_LIMIT 4 */
209
210 /* Decided for zoltan_gid_64 branch to always used posted receives because
211 * Alltoall requires that offsets be 32-bit integers. October 2010
212 */
213 #define MPI_RECV_LIMIT 0
214 /* #define MPI_RECV_LIMIT 2000 */
215 #endif
216// -----------------------------------------------------------------------------
217
218} // end namespace Zoltan2
219
220#endif
int do_reverse(int tag, const Teuchos::ArrayRCP< char > &send_data, int nbytes, const Teuchos::ArrayRCP< int > &sizes, Teuchos::ArrayRCP< char > &recv_data)
Zoltan2_Directory_Comm(int nvals, const Teuchos::ArrayRCP< int > &assign, Teuchos::RCP< const Teuchos::Comm< int > > comm, int tag)
int do_forward(int tag, const Teuchos::ArrayRCP< char > &send_data, int nbytes, Teuchos::ArrayRCP< char > &recv_data)
int resize(const Teuchos::ArrayRCP< int > &sizes, int tag, int *sum_recv_sizes)
void getInvertedValues(Zoltan2_Directory_Plan *from)
void print(const std::string &headerMessage) const
Teuchos::ArrayRCP< char > getRecvBuff() const
Teuchos::ArrayRCP< Teuchos::RCP< Teuchos::CommRequest< int > > > request
Teuchos::RCP< const Teuchos::Comm< int > > comm
Created by mbenlioglu on Aug 31, 2020.