Alpine3D 20240426.cd14b8b
MPIControl.h
Go to the documentation of this file.
1/***********************************************************************************/
2/* Copyright 2009-2015 WSL Institute for Snow and Avalanche Research SLF-DAVOS */
3/***********************************************************************************/
4/* This file is part of Alpine3D.
5 Alpine3D is free software: you can redistribute it and/or modify
6 it under the terms of the GNU Lesser General Public License as published by
7 the Free Software Foundation, either version 3 of the License, or
8 (at your option) any later version.
9
10 Alpine3D is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public License
16 along with Alpine3D. If not, see <http://www.gnu.org/licenses/>.
17*/
18#ifndef MPIWRAPPER_H
19#define MPIWRAPPER_H
20
21#ifdef ENABLE_MPI
22 #include <mpi.h>
23#endif
24
25#ifdef _OPENMP
26 #include <omp.h>
27#endif
28
29#include <meteoio/MeteoIO.h>
30#include <cstdio>
31
44{
45 public:
51 static MPIControl& instance();
52
53 // The following methods refer to OpenMP
58 bool openmp() const;
59
64 size_t thread() const;
65
70 size_t max_threads() const;
71
72 // The following methods refer to MPI
77 bool master() const;
78
83 size_t master_rank() const;
84
89 size_t rank() const;
90
95 size_t size() const;
96
101 std::string name() const;
102
107 void barrier() const;
108
116 void gather(const int& send_value, std::vector<int>& receive_vector, const size_t& root = 0);
117
119
128 void reduce_max(double& value, const bool all=true);
129 void reduce_min(double& value, const bool all=true);
130 void reduce_sum(double& value, const bool all=true);
131 void reduce_sum(int& value, const bool all=true);
133
142 template<class T> static void deserialize(const void* in, const size_t& len, T& obj)
143 {
144 std::stringstream obj_stream;
145 obj_stream.write(reinterpret_cast<const char*>(in), len);
146 obj_stream >> obj;
147 }
148
157 template<class T> static size_t serialize(void** out, const T& obj, const bool alloc=false)
158 {
159 std::stringstream obj_stream;
160 obj_stream << obj;
161 const size_t len = obj_stream.str().size();
162 if (alloc) {
163 *out = new char[len];
164 }
165 obj_stream.read(reinterpret_cast<char*>(*out), len);
166 return len;
167 }
168
169 #ifdef ENABLE_MPI
179 template <class T> static void op_sum_func(void* in, void* out, int* /*len*/, MPI_Datatype* datatype)
180 {
181 int tmp_size;
182 MPI_Type_size(*datatype, &tmp_size);
183
184 T in_obj, out_obj;
185
186 deserialize(in, static_cast<size_t>(tmp_size), in_obj);
187 deserialize(out, static_cast<size_t>(tmp_size), out_obj);
188
189 out_obj += in_obj;
190
191 serialize(&out, out_obj);
192 }
193 #endif
194
195 #ifdef ENABLE_MPI
203 template <class T> void reduce_sum(T& obj, const bool all=true)
204 {
205 if (size_ <= 1) return;
206
207 MPI_Op op;
208 MPI_Op_create(op_sum_func<T>, true, &op);
209
210 void* in_obj_char=NULL;
211 void* out_obj_char=NULL;
212
213 const size_t len = serialize(&in_obj_char, obj, true);
214 out_obj_char = new char[len];
215
216 MPI_Datatype chars_datatype;
217 MPI_Type_contiguous(static_cast<int>(len), MPI_CHAR, &chars_datatype);
218 MPI_Type_commit(&chars_datatype);
219
220 if (all)
221 MPI_Allreduce(in_obj_char, out_obj_char, 1, chars_datatype, op, MPI_COMM_WORLD);
222 else
223 MPI_Reduce(in_obj_char, out_obj_char, 1, chars_datatype, op, master_rank(), MPI_COMM_WORLD);
224 MPI_Op_free(&op);
225
226 if (all || rank() == master_rank()) deserialize(out_obj_char, len, obj);
227
228 delete[] (char*)out_obj_char;
229 delete[] (char*)in_obj_char;
230 }
231 #else
232 template <class T> void reduce_sum(T& /*obj*/, const bool /*all*/=true) {}
233 #endif
234
235 #ifdef ENABLE_MPI
242 template <class T> void broadcast(T& obj, const size_t& root = 0)
243 {
244 if (size_ <= 1) return;
245
246 std::string obj_string;
247
248 if (rank_ == root) { // Build the string that represents the object
249 std::stringstream obj_stream;
250 obj_stream << obj;
251 obj_string.insert(0, obj_stream.str());
252 }
253
254 broadcast(obj_string, root);
255
256 if (rank_ != root) { // Deserialize the object broadcasted
257 std::stringstream obj_stream;
258 obj_stream << obj_string;
259 obj_stream >> obj;
260 }
261 }
262 #else
263 template <class T> void broadcast(T& /*obj*/, const size_t& root=0) {(void)root;}
264 #endif
265
266 #ifdef ENABLE_MPI
274 template <class T> void broadcast(std::vector<T>& vec_obj, const size_t& root = 0)
275 {
276 if (size_ <= 1) return;
277
278 std::string obj_string;
279 size_t vec_size;
280
281 if (rank_ == root) { // Build the string that represents the objects
282 std::stringstream objs_stream;
283 for (size_t ii=0; ii<vec_obj.size(); ii++)
284 objs_stream << vec_obj[ii];
285 obj_string.insert(0, objs_stream.str());
286 vec_size = vec_obj.size();
287 }
288
289 broadcast(vec_size, root);
290 broadcast(obj_string, root);
291
292 if (rank_ != root) { // Deserialize the objects broadcasted
293 vec_obj.clear();
294
295 std::stringstream objs_stream;
296 objs_stream << obj_string;
297
298 T tmp;
299 for (size_t ii=0; ii<vec_size; ii++) {
300 objs_stream >> tmp;
301 vec_obj.push_back(tmp);
302 }
303 }
304 }
305 #else
306 template <class T> void broadcast(std::vector<T>& /*vec_obj*/, const size_t& root=0) {(void)root;}
307 #endif
308
309 #ifdef ENABLE_MPI
320 template <class T> void scatter(std::vector<T*>& vec_local, const size_t& root = 0)
321 {
322 if (size_ <= 1) return;
323
324 if (rank_ == root) {
325 for (size_t ii=1; ii<size_; ii++) { // HACK: Assuming root is master
326 size_t startx, deltax;
327 getArraySliceParams(vec_local.size(), ii, startx, deltax);
328
329 send((int)deltax, ii);
330 for (size_t jj=startx; jj<(startx+deltax); jj++) {
331 std::string obj_string;
332 std::stringstream objs_stream;
333
334 objs_stream << *(vec_local[jj]);
335 obj_string.insert(0, objs_stream.str());
336 send(obj_string, ii);
337 delete vec_local[jj];
338 }
339 }
340
341 size_t startx, deltax;
342 getArraySliceParams(vec_local.size(), startx, deltax);
343 vec_local.resize(deltax);
344 } else {
345 std::string obj_string;
346 int deltax;
347 receive(deltax, root);
348 vec_local.resize((size_t)deltax);
349
350 T obj; // temporary object
351 for (size_t ii=0; ii<vec_local.size(); ii++) {
352 std::stringstream objs_stream;
353 receive(obj_string, root);
354
355 objs_stream << obj_string;
356
357 objs_stream >> obj;
358 vec_local[ii] = new T(obj);
359 }
360 }
361 }
362 #else
363 template <class T> void scatter(std::vector<T*>& /*vec_local*/, const size_t& root=0) {(void)root;}
364 #endif
365
366 #ifdef ENABLE_MPI
374 template <class T> void send(const std::vector<T*>& vec_local, const size_t& destination, const int& tag=0);
375 template <class T> void send(const std::vector<T>& vec_local, const size_t& destination, const int& tag=0);
376 #else
377 template <class T> void send(const std::vector<T*>& /*vec_local*/, const size_t& /*destination*/, const int& tag=0) {(void)tag;}
378 template <class T> void send(const std::vector<T>& /*vec_local*/, const size_t& /*destination*/, const int& tag=0) {(void)tag;}
379 #endif
380
381 #ifdef ENABLE_MPI
389 template <class T> void receive(std::vector<T*>& vec_local, const size_t& source, const int& tag=0);
390 template <class T> void receive(std::vector<T>& vec_local, const size_t& source, const int& tag=0);
391 #else
392 template <class T> void receive(std::vector<T*>& /*vec_local*/, const size_t& /*source*/, const int& tag=0) {(void)tag;}
393 template <class T> void receive(std::vector<T>& /*vec_local*/, const size_t& /*source*/, const int& tag=0) {(void)tag;}
394 #endif
395
396 #ifdef ENABLE_MPI
407 template <class T> void gather(std::vector<T*>& vec_local, const size_t& root = 0)
408 {
409 if (size_ <= 1) return;
410
411 std::vector<int> vec_sizes;
412 const size_t sum = vec_local.size();
413
414 gather(vec_local.size(), vec_sizes); //global offsets
415 reduce_sum(sum, false); //global amount of T*
416
417 if (rank_ == root) {
418 std::string obj_string;
419 size_t local_sum = vec_local.size();
420
421 vec_local.resize(sum);
422
423 T obj; //temporary object
424 for (size_t ii=1; ii<vec_sizes.size(); ii++) {//HACK: Assuming master is always rank 0
425 for (size_t jj=0; jj<vec_sizes[ii]; jj++) {
426 std::stringstream objs_stream;
427 receive(obj_string, ii);
428
429 objs_stream << obj_string;
430 objs_stream >> obj;
431 vec_local[local_sum+jj] = new T(obj);
432 }
433 local_sum += vec_sizes[ii];
434 }
435
436 } else {
437 for (size_t ii=0; ii<vec_local.size(); ii++) {
438 std::string obj_string;
439 std::stringstream objs_stream;
440
441 objs_stream << *(vec_local[ii]);
442 obj_string.insert(0, objs_stream.str());
443 send(obj_string, root);
444 }
445 }
446 }
447 #else
448 template <class T> void gather(std::vector<T*>& /*vec_local*/, const size_t& root=0) {(void)root;}
449 #endif
450
458 void getArraySliceParams(const size_t& dimx, size_t& startx_sub, size_t& nx_sub) const {getArraySliceParams(dimx, size_, rank_, startx_sub, nx_sub);}
459 void getArraySliceParams(const size_t& dimx, const size_t& idx_wk, size_t& startx_sub, size_t& nx_sub) const {getArraySliceParams(dimx, size_, idx_wk, startx_sub, nx_sub);}
460
461 void getArraySliceParamsOptim(const size_t& dimx, size_t& startx_sub, size_t& nx_sub,const mio::DEMObject& dem, const mio::Grid2DObject& landuse)
462 {getArraySliceParamsOptim(dimx, rank_, startx_sub, nx_sub, dem, landuse);}
463
464 void getArraySliceParamsOptim(const size_t& dimx, const size_t& idx_wk, size_t& startx_sub, size_t& nx_sub,const mio::DEMObject& dem, const mio::Grid2DObject& landuse);
465
476 static void getArraySliceParams(const size_t& dimx, const size_t& nbworkers, const size_t& idx_wk, size_t& startx_sub, size_t& nx_sub);
477
487 static void getArraySliceParams(const size_t& dimx, const size_t& nbworkers, std::vector<size_t>& offset, std::vector<size_t>& nx);
488
489 private:
490 MPIControl();
491 ~MPIControl();
492
493 MPIControl(MPIControl const&); //No implementation allowed - singleton class
494 void operator=(MPIControl const&); //No implementation allowed - singleton class
495 void broadcast(std::string& message, const size_t& root);
496
497 void send(std::string& message, const size_t& recipient, const int& tag=0);
498 void receive(std::string& message, const size_t& source, const int& tag=0);
499 void send(const int& value, const size_t& recipient, const int& tag=0);
500 void receive(int& value, const size_t& source, const int& tag=0);
501
502 static void checkSuccess(const int& ierr);
503
504 size_t rank_; // the rank of this process
505 size_t size_; // the number of all processes
506 std::string name_; // the name of the node this process is running on
507};
508
509#endif
A singleton class that deals with all aspects of parallelization within Alpine3D (MPI,...
Definition: MPIControl.h:44
std::string name() const
Definition: MPIControl.cc:90
void reduce_min(double &value, const bool all=true)
Definition: MPIControl.cc:217
void scatter(std::vector< T * > &vec_local, const size_t &root=0)
Scatter the objects pointed to by vector<T*> by slices to all preocesses. Internally no MPI_Scatterv ...
Definition: MPIControl.h:320
void broadcast(std::vector< T > &vec_obj, const size_t &root=0)
Broadcast a vector of class T objects via MPI or in case MPI is not activated don't do anything.
Definition: MPIControl.h:274
void getArraySliceParamsOptim(const size_t &dimx, size_t &startx_sub, size_t &nx_sub, const mio::DEMObject &dem, const mio::Grid2DObject &landuse)
Definition: MPIControl.h:461
size_t max_threads() const
Definition: MPIControl.cc:66
size_t thread() const
Definition: MPIControl.cc:57
void barrier() const
Definition: MPIControl.cc:247
void broadcast(T &obj, const size_t &root=0)
Broadcast an object via MPI or in case MPI is not activated don't do anything.
Definition: MPIControl.h:242
void gather(const int &send_value, std::vector< int > &receive_vector, const size_t &root=0)
Definition: MPIControl.cc:195
void send(const std::vector< T * > &vec_local, const size_t &destination, const int &tag=0)
Send the objects pointed to by vector<T*> to process #destination.
Definition: MPIControl.cc:299
bool master() const
Definition: MPIControl.cc:95
void reduce_sum(double &value, const bool all=true)
Definition: MPIControl.cc:227
static void getArraySliceParams(const size_t &dimx, const size_t &nbworkers, std::vector< size_t > &offset, std::vector< size_t > &nx)
Returns the parameters for splitting an array in several, balanced sub-arrays. This is mostly usefull...
void gather(std::vector< T * > &vec_local, const size_t &root=0)
Gathers the objects pointed to by vector<T*> from all processes into a vector<T*> on the root node....
Definition: MPIControl.h:407
size_t size() const
Definition: MPIControl.cc:85
void getArraySliceParams(const size_t &dimx, size_t &startx_sub, size_t &nx_sub) const
Helper to split an array for domain decomposition. Given the overall size of an array calculate a sta...
Definition: MPIControl.h:458
void getArraySliceParams(const size_t &dimx, const size_t &idx_wk, size_t &startx_sub, size_t &nx_sub) const
Definition: MPIControl.h:459
static void op_sum_func(void *in, void *out, int *, MPI_Datatype *datatype)
Definition: MPIControl.h:179
void receive(std::vector< T * > &vec_local, const size_t &source, const int &tag=0)
Receive vector of objects from process #source.
Definition: MPIControl.cc:322
static void deserialize(const void *in, const size_t &len, T &obj)
Definition: MPIControl.h:142
void reduce_sum(T &obj, const bool all=true)
Adds up the values of type T from all processes and, if all==true, distributes the sum back to all pr...
Definition: MPIControl.h:203
size_t rank() const
Definition: MPIControl.cc:75
size_t master_rank() const
Definition: MPIControl.cc:80
static MPIControl & instance()
Definition: MPIControl.cc:417
void reduce_max(double &value, const bool all=true)
Definition: MPIControl.cc:207
static size_t serialize(void **out, const T &obj, const bool alloc=false)
Definition: MPIControl.h:157
bool openmp() const
Definition: MPIControl.cc:48