dune-grid-glue  2.6-git
ringcomm.hh
Go to the documentation of this file.
1 // -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 // vi: set et ts=4 sw=2 sts=2:
3 /* IMPLEMENTATION OF CLASS G R I D G L U E */
4 
6 #define CheckMPIStatus(A,B) {}
7 
8 #include <mpi.h>
9 #include <functional>
10 
11 #include <dune/common/fvector.hh>
12 #include <dune/common/shared_ptr.hh>
13 #include <dune/common/hybridutilities.hh>
14 #include <dune/common/std/utility.hh>
15 #include <dune/common/std/apply.hh>
16 
17 #include <dune/geometry/type.hh>
18 
19 namespace Dune {
20 namespace Parallel {
21 
22  namespace Impl {
23 
25  template<typename T>
26  struct MPITypeInfo {};
27 
28  template<>
29  struct MPITypeInfo< int >
30  {
31  static const unsigned int size = 1;
32  static inline MPI_Datatype getType()
33  {
34  return MPI_INT;
35  }
36  };
37 
38  template<typename K, int N>
39  struct MPITypeInfo< Dune::FieldVector<K,N> >
40  {
41  static const unsigned int size = N;
42  static inline MPI_Datatype getType()
43  {
44  return Dune::MPITraits<K>::getType();
45  }
46  };
47 
48  template<>
49  struct MPITypeInfo< unsigned int >
50  {
51  static const unsigned int size = 1;
52  static inline MPI_Datatype getType()
53  {
54  return MPI_UNSIGNED;
55  }
56  };
57 
58  template<>
59  struct MPITypeInfo< Dune::GeometryType >
60  {
61  static const unsigned int size = 1;
62  static inline MPI_Datatype getType()
63  {
64  return Dune::MPITraits< Dune::GeometryType >::getType();
65  }
66  };
67 
68  template<typename T>
69  void MPI_SetVectorSize(
70  std::vector<T> & data,
71  MPI_Status & status)
72  {
73  typedef MPITypeInfo<T> Info;
74  int sz;
75  MPI_Get_count(&status, Info::getType(), &sz);
76  assert(sz%Info::size == 0);
77  data.resize(sz/Info::size);
78  }
79 
89  template<typename T>
90  void MPI_SendVectorInRing(
91  std::vector<T> & data,
92  std::vector<T> & next,
93  int tag,
94  int rightrank,
95  int leftrank,
96  MPI_Comm comm,
97  MPI_Request& r_send,
98  MPI_Request& r_recv
99  )
100  {
101  // mpi status stuff
102  int result DUNE_UNUSED;
103  result = 0;
104  typedef MPITypeInfo<T> Info;
105  // resize next buffer to maximum size
106  next.resize(next.capacity());
107  // send data (explicitly send data.size elements)
108  result =
109  MPI_Isend(
110  &(data[0]), Info::size*data.size(), Info::getType(), rightrank, tag,
111  comm, &r_send);
112  // receive up to maximum size. The acutal size is stored in the status
113  result =
114  MPI_Irecv(
115  &(next[0]), Info::size*next.size(), Info::getType(), leftrank, tag,
116  comm, &r_recv);
117  // // check result
118  // MPI_Status status;
119  // CheckMPIStatus(result, status);
120  }
121 
122  template<typename T>
123  using ptr_t = T*;
124 
125  /* these helper structs are needed as long as we still support
126  C++11, as we can't use variadic lambdas */
127  template<typename... Args>
128  struct call_MPI_SendVectorInRing
129  {
130  std::tuple<Args...> & remotedata;
131  std::tuple<Args...> & nextdata;
132  int & tag;
133  int & rightrank;
134  int & leftrank;
135  MPI_Comm & mpicomm;
136  std::array<MPI_Request,sizeof...(Args)> & requests_send;
137  std::array<MPI_Request,sizeof...(Args)> & requests_recv;
138 
139  template<typename I>
140  void operator()(I i)
141  {
142  MPI_SendVectorInRing(
143  std::get<i>(remotedata),
144  std::get<i>(nextdata),
145  tag+i,
146  rightrank, leftrank, mpicomm,
147  requests_send[i],
148  requests_recv[i]);
149  }
150  };
151  template<typename... Args>
152  struct call_MPI_SetVectorSize
153  {
154  std::tuple<Args...> & nextdata;
155  std::array<MPI_Status,sizeof...(Args)> & status_recv;
156 
157  template<typename I>
158  void operator()(I i)
159  {
160  MPI_SetVectorSize(std::get<i>(nextdata),status_recv[i]);
161  }
162  };
163 
164  template<typename OP, std::size_t... Indices, typename... Args>
165  void MPI_AllApply_impl(MPI_Comm mpicomm,
166  OP && op,
167  std::index_sequence<Indices...> indices,
168  const Args&... data)
169  {
170  constexpr std::size_t N = sizeof...(Args);
171  int myrank = 0;
172  int commsize = 0;
173 #if HAVE_MPI
174  MPI_Comm_rank(mpicomm, &myrank);
175  MPI_Comm_size(mpicomm, &commsize);
176 #endif // HAVE_MPI
177 
178  if (commsize > 1)
179  {
180 #ifdef DEBUG_GRIDGLUE_PARALLELMERGE
181  std::cout << myrank << " Start Communication, size " << commsize << std::endl;
182 #endif
183 
184  // get data sizes
185  std::array<unsigned int, N> size({ ((unsigned int)data.size())... });
186 
187  // communicate max data size
188  std::array<unsigned int, N> maxSize;
189  MPI_Allreduce(&size, &maxSize,
190  size.size(), MPI_UNSIGNED, MPI_MAX, mpicomm);
191 #ifdef DEBUG_GRIDGLUE_PARALLELMERGE
192  std::cout << myrank << " maxSize " << "done... " << std::endl;
193 #endif
194 
195  // allocate receiving buffers with maxsize to ensure sufficient buffer size for communication
196  std::tuple<Args...> remotedata { Args(maxSize[Indices])... };
197 
198  // copy local data to receiving buffer
199  remotedata = std::tie(data...);
200 
201  // allocate second set of receiving buffers necessary for async communication
202  std::tuple<Args...> nextdata { Args(maxSize[Indices])... };
203 
204  // communicate data in the ring
205  int rightrank = (myrank + 1 + commsize) % commsize;
206  int leftrank = (myrank - 1 + commsize) % commsize;
207 
208  std::cout << myrank << ": size = " << commsize << std::endl;
209  std::cout << myrank << ": left = " << leftrank
210  << " right = " << rightrank << std::endl;
211 
212  // currently the remote data is our own data
213  int remoterank = myrank;
214 
215  for (int i=1; i<commsize; i++)
216  {
217  // in this iteration we will receive data from nextrank
218  int nextrank = (myrank - i + commsize) % commsize;
219 
220  std::cout << myrank << ": next = " << nextrank << std::endl;
221 
222  // send remote data to right neighbor and receive from left neighbor
223  std::array<MPI_Request,N> requests_send;
224  std::array<MPI_Request,N> requests_recv;
225 
226  int tag = 12345678;
227  Dune::Hybrid::forEach(indices,
228  // [&](auto i){
229  // MPI_SendVectorInRing(
230  // std::get<i>(remotedata),
231  // std::get<i>(nextdata),
232  // tag+i,
233  // rightrank, leftrank, mpicomm,
234  // requests_send[i],
235  // requests_recv[i]);
236  // });
237  call_MPI_SendVectorInRing<Args...>({
238  remotedata,
239  nextdata,
240  tag,
241  rightrank, leftrank, mpicomm,
242  requests_send,
243  requests_recv
244  }));
245 
246  // apply operator
247  op(remoterank,std::get<Indices>(remotedata)...);
248 
249  // wait for communication to finalize
250  std::array<MPI_Status,N> status_send;
251  std::array<MPI_Status,N> status_recv;
252  MPI_Waitall(N,&requests_recv[0],&status_recv[0]);
253 
254  // we finished receiving from nextrank and thus remoterank = nextrank
255  remoterank = nextrank;
256 
257  // get current data sizes
258  // and resize vectors
259  Dune::Hybrid::forEach(indices,
260  // [&](auto i){
261  // MPI_SetVectorSize(std::get<i>(nextdata),status_recv[i]);
262  // });
263  call_MPI_SetVectorSize<Args...>({
264  nextdata, status_recv
265  }));
266 
267  MPI_Waitall(N,&requests_send[0],&status_send[0]);
268 
269  // swap the communication buffers
270  std::swap(remotedata,nextdata);
271  }
272 
273  // last apply (or the only one in the case of sequential application)
274  op(remoterank,std::get<Indices>(remotedata)...);
275  }
276  else // sequential
277  {
278  op(myrank,data...);
279  }
280  }
281 
282  } // end namespace Impl
283 
297 template<typename OP, typename... Args>
298 void MPI_AllApply(MPI_Comm mpicomm,
299  OP && op,
300  const Args& ... data)
301 {
302  Impl::MPI_AllApply_impl(
303  mpicomm,
304  std::forward<OP>(op),
305  std::make_index_sequence<sizeof...(Args)>(),
306  data...
307  );
308 }
309 
310 } // end namespace Parallel
311 } // end namespace Dune
void MPI_AllApply(MPI_Comm mpicomm, OP &&op, const Args &... data)
apply an operator locally to a difstributed data set
Definition: ringcomm.hh:298
Definition: gridglue.hh:35