dune-common  2.2.0
mpicollectivecommunication.hh
Go to the documentation of this file.
1 // -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 // vi: set et ts=4 sw=2 sts=2:
3 #ifndef DUNE_MPICOLLECTIVECOMMUNICATION_HH
4 #define DUNE_MPICOLLECTIVECOMMUNICATION_HH
5 
11 #include<iostream>
12 #include<complex>
13 #include<algorithm>
14 #include<functional>
15 
16 #include"exceptions.hh"
18 #include"binaryfunctions.hh"
19 #include"shared_ptr.hh"
20 #include"mpitraits.hh"
21 
22 #if HAVE_MPI
23 // MPI header
24 #include<mpi.h>
25 
26 namespace Dune
27 {
28 
29  //=======================================================
30  // use singleton pattern and template specialization to
31  // generate MPI operations
32  //=======================================================
33 
34  template<typename Type, typename BinaryFunction>
36  {
37 
38  public:
39  static MPI_Op get ()
40  {
41  if (!op)
42  {
43  op = shared_ptr<MPI_Op>(new MPI_Op);
44  MPI_Op_create((void (*)(void*, void*, int*, MPI_Datatype*))&operation,true,op.get());
45  }
46  return *op;
47  }
48  private:
49  static void operation (Type *in, Type *inout, int *len, MPI_Datatype *dptr)
50  {
51  BinaryFunction func;
52 
53  for (int i=0; i< *len; ++i, ++in, ++inout){
54  Type temp;
55  temp = func(*in, *inout);
56  *inout = temp;
57  }
58  }
59  Generic_MPI_Op () {}
60  Generic_MPI_Op (const Generic_MPI_Op& ) {}
61  static shared_ptr<MPI_Op> op;
62  };
63 
64 
65  template<typename Type, typename BinaryFunction>
66  shared_ptr<MPI_Op> Generic_MPI_Op<Type,BinaryFunction>::op = shared_ptr<MPI_Op>(static_cast<MPI_Op*>(0));
67 
68 #define ComposeMPIOp(type,func,op) \
69  template<> \
70  class Generic_MPI_Op<type, func<type> >{ \
71  public:\
72  static MPI_Op get(){ \
73  return op; \
74  } \
75  private:\
76  Generic_MPI_Op () {}\
77  Generic_MPI_Op (const Generic_MPI_Op& ) {}\
78  }
79 
80 
81  ComposeMPIOp(char, std::plus, MPI_SUM);
82  ComposeMPIOp(unsigned char, std::plus, MPI_SUM);
83  ComposeMPIOp(short, std::plus, MPI_SUM);
84  ComposeMPIOp(unsigned short, std::plus, MPI_SUM);
85  ComposeMPIOp(int, std::plus, MPI_SUM);
86  ComposeMPIOp(unsigned int, std::plus, MPI_SUM);
87  ComposeMPIOp(long, std::plus, MPI_SUM);
88  ComposeMPIOp(unsigned long, std::plus, MPI_SUM);
89  ComposeMPIOp(float, std::plus, MPI_SUM);
90  ComposeMPIOp(double, std::plus, MPI_SUM);
91  ComposeMPIOp(long double, std::plus, MPI_SUM);
92 
93  ComposeMPIOp(char, std::multiplies, MPI_PROD);
94  ComposeMPIOp(unsigned char, std::multiplies, MPI_PROD);
95  ComposeMPIOp(short, std::multiplies, MPI_PROD);
96  ComposeMPIOp(unsigned short, std::multiplies, MPI_PROD);
97  ComposeMPIOp(int, std::multiplies, MPI_PROD);
98  ComposeMPIOp(unsigned int, std::multiplies, MPI_PROD);
99  ComposeMPIOp(long, std::multiplies, MPI_PROD);
100  ComposeMPIOp(unsigned long, std::multiplies, MPI_PROD);
101  ComposeMPIOp(float, std::multiplies, MPI_PROD);
102  ComposeMPIOp(double, std::multiplies, MPI_PROD);
103  ComposeMPIOp(long double, std::multiplies, MPI_PROD);
104 
105  ComposeMPIOp(char, Min, MPI_MIN);
106  ComposeMPIOp(unsigned char, Min, MPI_MIN);
107  ComposeMPIOp(short, Min, MPI_MIN);
108  ComposeMPIOp(unsigned short, Min, MPI_MIN);
109  ComposeMPIOp(int, Min, MPI_MIN);
110  ComposeMPIOp(unsigned int, Min, MPI_MIN);
111  ComposeMPIOp(long, Min, MPI_MIN);
112  ComposeMPIOp(unsigned long, Min, MPI_MIN);
113  ComposeMPIOp(float, Min, MPI_MIN);
114  ComposeMPIOp(double, Min, MPI_MIN);
115  ComposeMPIOp(long double, Min, MPI_MIN);
116 
117  ComposeMPIOp(char, Max, MPI_MAX);
118  ComposeMPIOp(unsigned char, Max, MPI_MAX);
119  ComposeMPIOp(short, Max, MPI_MAX);
120  ComposeMPIOp(unsigned short, Max, MPI_MAX);
121  ComposeMPIOp(int, Max, MPI_MAX);
122  ComposeMPIOp(unsigned int, Max, MPI_MAX);
123  ComposeMPIOp(long, Max, MPI_MAX);
124  ComposeMPIOp(unsigned long, Max, MPI_MAX);
125  ComposeMPIOp(float, Max, MPI_MAX);
126  ComposeMPIOp(double, Max, MPI_MAX);
127  ComposeMPIOp(long double, Max, MPI_MAX);
128 
129 #undef ComposeMPIOp
130 
131 
132  //=======================================================
133  // use singleton pattern and template specialization to
134  // generate MPI operations
135  //=======================================================
136 
140  template<>
141  class CollectiveCommunication<MPI_Comm>
142  {
143  public:
145  CollectiveCommunication (const MPI_Comm& c)
146  : communicator(c)
147  {
148  if(communicator!=MPI_COMM_NULL){
149  MPI_Comm_rank(communicator,&me);
150  MPI_Comm_size(communicator,&procs);
151  }else{
152  procs=0;
153  me=-1;
154  }
155  }
156 
158  int rank () const
159  {
160  return me;
161  }
162 
164  int size () const
165  {
166  return procs;
167  }
168 
170  template<typename T>
171  T sum (T& in) const // MPI does not know about const :-(
172  {
173  T out;
174  allreduce<std::plus<T> >(&in,&out,1);
175  return out;
176  }
177 
179  template<typename T>
180  int sum (T* inout, int len) const
181  {
182  return allreduce<std::plus<T> >(inout,len);
183  }
184 
186  template<typename T>
187  T prod (T& in) const // MPI does not know about const :-(
188  {
189  T out;
190  allreduce<std::multiplies<T> >(&in,&out,1);
191  return out;
192  }
193 
195  template<typename T>
196  int prod (T* inout, int len) const
197  {
198  return allreduce<std::plus<T> >(inout,len);
199  }
200 
202  template<typename T>
203  T min (T& in) const // MPI does not know about const :-(
204  {
205  T out;
206  allreduce<Min<T> >(&in,&out,1);
207  return out;
208  }
209 
211  template<typename T>
212  int min (T* inout, int len) const
213  {
214  return allreduce<Min<T> >(inout,len);
215  }
216 
217 
219  template<typename T>
220  T max (T& in) const // MPI does not know about const :-(
221  {
222  T out;
223  allreduce<Max<T> >(&in,&out,1);
224  return out;
225  }
226 
228  template<typename T>
229  int max (T* inout, int len) const
230  {
231  return allreduce<Max<T> >(inout,len);
232  }
233 
235  int barrier () const
236  {
237  return MPI_Barrier(communicator);
238  }
239 
241  template<typename T>
242  int broadcast (T* inout, int len, int root) const
243  {
244  return MPI_Bcast(inout,len,MPITraits<T>::getType(),root,communicator);
245  }
246 
248  template<typename T>
249  int gather (T* in, T* out, int len, int root) const // note out must have space for P*len elements
250  {
251  return MPI_Gather(in,len,MPITraits<T>::getType(),
252  out,len,MPITraits<T>::getType(),
253  root,communicator);
254  }
255 
257  template<typename T>
258  int scatter (T* send, T* recv, int len, int root) const // note out must have space for P*len elements
259  {
260  return MPI_Scatter(send,len,MPITraits<T>::getType(),
261  recv,len,MPITraits<T>::getType(),
262  root,communicator);
263  }
264 
265  operator MPI_Comm () const
266  {
267  return communicator;
268  }
269 
271  template<typename T, typename T1>
272  int allgather(T* sbuf, int count, T1* rbuf) const
273  {
274  return MPI_Allgather(sbuf, count, MPITraits<T>::getType(),
275  rbuf, count, MPITraits<T1>::getType(),
276  communicator);
277  }
278 
280  template<typename BinaryFunction, typename Type>
281  int allreduce(Type* inout, int len) const
282  {
283  Type* out = new Type[len];
284  int ret = allreduce<BinaryFunction>(inout,out,len);
285  std::copy(out, out+len, inout);
286  delete[] out;
287  return ret;
288  }
289 
291  template<typename BinaryFunction, typename Type>
292  int allreduce(Type* in, Type* out, int len) const
293  {
294  return MPI_Allreduce(in, out, len, MPITraits<Type>::getType(),
296  }
297 
298  private:
299  MPI_Comm communicator;
300  int me;
301  int procs;
302  };
303 } // namespace dune
304 
305 #endif
306 #endif