My Project
ParallelRestart.hpp
1 /*
2  Copyright 2019 Equinor AS.
3 
4  This file is part of the Open Porous Media project (OPM).
5 
6  OPM is free software: you can redistribute it and/or modify
7  it under the terms of the GNU General Public License as published by
8  the Free Software Foundation, either version 3 of the License, or
9  (at your option) any later version.
10 
11  OPM is distributed in the hope that it will be useful,
12  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  GNU General Public License for more details.
15 
16  You should have received a copy of the GNU General Public License
17  along with OPM. If not, see <http://www.gnu.org/licenses/>.
18 */
19 #ifndef PARALLEL_RESTART_HPP
20 #define PARALLEL_RESTART_HPP
21 
22 #if HAVE_MPI
23 #include <mpi.h>
24 #endif
25 
26 #include <opm/common/ErrorMacros.hpp>
27 #include <opm/common/utility/TimeService.hpp>
28 
29 #include <dune/common/version.hh>
30 #include <dune/common/parallel/mpihelper.hh>
31 
32 #include <opm/simulators/utils/ParallelCommunication.hpp>
33 
34 #include <chrono>
35 #include <optional>
36 #include <map>
37 #include <set>
38 #include <string>
39 #include <tuple>
40 #include <typeinfo>
41 #include <unordered_map>
42 #include <unordered_set>
43 #include <vector>
44 
45 namespace Opm
46 {
47 
48 class EclipseIO;
49 class SummaryState;
50 class RestartKey;
51 class RestartValue;
52 
53 namespace data
54 {
55 struct AquiferData;
56 struct CarterTracyData;
57 struct CellData;
58 struct Connection;
59 struct CurrentControl;
60 struct FetkovichData;
61 class GroupAndNetworkValues;
62 struct GroupConstraints;
63 struct GroupData;
64 struct GroupGuideRates;
65 class GuideRateValue;
66 struct NodeData;
67 struct NumericAquiferData;
68 class Rates;
69 struct Segment;
70 class Solution;
71 struct Well;
72 class Wells;
73 }
74 
75 namespace Action
76 {
77 class State;
78 }
79 
80 namespace Mpi
81 {
82 template<class T>
83 std::size_t packSize(const T*, std::size_t, Opm::Parallel::MPIComm,
84  std::integral_constant<bool, false>);
85 
86 template<class T>
87 std::size_t packSize(const T*, std::size_t l, Opm::Parallel::MPIComm comm,
88  std::integral_constant<bool, true>);
89 
90 template<class T>
91 std::size_t packSize(const T* data, std::size_t l, Opm::Parallel::MPIComm comm);
92 
93 template<class T>
94 std::size_t packSize(const T&, Opm::Parallel::MPIComm,
95  std::integral_constant<bool, false>)
96 {
97  std::string msg = std::string{"Packing not (yet) supported for non-pod type: "} + typeid(T).name();
98  OPM_THROW(std::logic_error, msg);
99 }
100 
101 template<class T>
102 std::size_t packSize(const T&, Opm::Parallel::MPIComm comm,
103  std::integral_constant<bool, true>)
104 {
105 #if HAVE_MPI
106  int size{};
107  MPI_Pack_size(1, Dune::MPITraits<T>::getType(), comm, &size);
108  return size;
109 #else
110  (void) comm;
111  return 0;
112 #endif
113 }
114 
115 template<class T>
116 std::size_t packSize(const T& data, Opm::Parallel::MPIComm comm)
117 {
118  return packSize(data, comm, typename std::is_pod<T>::type());
119 }
120 
121 template<class T1, class T2>
122 std::size_t packSize(const std::pair<T1,T2>& data, Opm::Parallel::MPIComm comm);
123 
124 template<class T>
125 std::size_t packSize(const std::optional<T>& data, Opm::Parallel::MPIComm comm);
126 
127 template<class T, class A>
128 std::size_t packSize(const std::vector<T,A>& data, Opm::Parallel::MPIComm comm);
129 
130 template<class K, class C, class A>
131 std::size_t packSize(const std::set<K,C,A>& data,
132  Opm::Parallel::MPIComm comm);
133 
134 template<class T, class H, class KE, class A>
135 std::size_t packSize(const std::unordered_set<T,H,KE,A>& data,
136  Opm::Parallel::MPIComm comm);
137 
138 template<class A>
139 std::size_t packSize(const std::vector<bool,A>& data, Opm::Parallel::MPIComm comm);
140 
141 template<class... Ts>
142 std::size_t packSize(const std::tuple<Ts...>& data, Opm::Parallel::MPIComm comm);
143 
144 template<class T, std::size_t N>
145 std::size_t packSize(const std::array<T,N>& data, Opm::Parallel::MPIComm comm);
146 
147 std::size_t packSize(const char* str, Opm::Parallel::MPIComm comm);
148 
149 template<class T1, class T2, class C, class A>
150 std::size_t packSize(const std::map<T1,T2,C,A>& data, Opm::Parallel::MPIComm comm);
151 
152 template<class T1, class T2, class H, class P, class A>
153 std::size_t packSize(const std::unordered_map<T1,T2,H,P,A>& data, Opm::Parallel::MPIComm comm);
154 
156 
157 template<class T>
158 void pack(const T*, std::size_t, std::vector<char>&, int&,
159  Opm::Parallel::MPIComm, std::integral_constant<bool, false>);
160 
161 template<class T>
162 void pack(const T* data, std::size_t l, std::vector<char>& buffer, int& position,
163  Opm::Parallel::MPIComm comm, std::integral_constant<bool, true>);
164 
165 template<class T>
166 void pack(const T* data, std::size_t l, std::vector<char>& buffer, int& position,
167  Opm::Parallel::MPIComm comm);
168 
169 template<class T>
170 void pack(const T&, std::vector<char>&, int&,
171  Opm::Parallel::MPIComm, std::integral_constant<bool, false>)
172 {
173  OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type.");
174 }
175 
176 template<class T>
177 void pack(const T& data, std::vector<char>& buffer, int& position,
178  Opm::Parallel::MPIComm comm, std::integral_constant<bool, true>)
179 {
180 #if HAVE_MPI
181  MPI_Pack(&data, 1, Dune::MPITraits<T>::getType(), buffer.data(),
182  buffer.size(), &position, comm);
183 #else
184  (void) data;
185  (void) comm;
186  (void) buffer;
187  (void) position;
188 #endif
189 }
190 
191 template<class T>
192 void pack(const T& data, std::vector<char>& buffer, int& position,
193  Opm::Parallel::MPIComm comm)
194 {
195  pack(data, buffer, position, comm, typename std::is_pod<T>::type());
196 }
197 
198 template<class T1, class T2>
199 void pack(const std::pair<T1,T2>& data, std::vector<char>& buffer, int& position,
200  Opm::Parallel::MPIComm comm);
201 
202 template<class T>
203 void pack(const std::optional<T>& data, std::vector<char>& buffer, int& position,
204  Opm::Parallel::MPIComm comm);
205 
206 template<class T, class A>
207 void pack(const std::vector<T,A>& data, std::vector<char>& buffer, int& position,
208  Opm::Parallel::MPIComm comm);
209 
210 template<class A>
211 void pack(const std::vector<bool,A>& data, std::vector<char>& buffer, int& position,
212  Opm::Parallel::MPIComm comm);
213 
214 template<class... Ts>
215 void pack(const std::tuple<Ts...>& data, std::vector<char>& buffer,
216  int& position, Opm::Parallel::MPIComm comm);
217 
218 template<class K, class C, class A>
219 void pack(const std::set<K,C,A>& data,
220  std::vector<char>& buffer, int& position,
221  Opm::Parallel::MPIComm comm);
222 
223 template<class T, class H, class KE, class A>
224 void pack(const std::unordered_set<T,H,KE,A>& data,
225  std::vector<char>& buffer, int& position,
226  Opm::Parallel::MPIComm comm);
227 
228 template<class T, size_t N>
229 void pack(const std::array<T,N>& data, std::vector<char>& buffer, int& position,
230  Opm::Parallel::MPIComm comm);
231 
232 template<class T1, class T2, class C, class A>
233 void pack(const std::map<T1,T2,C,A>& data, std::vector<char>& buffer, int& position,
234  Opm::Parallel::MPIComm comm);
235 
236 template<class T1, class T2, class H, class P, class A>
237 void pack(const std::unordered_map<T1,T2,H,P,A>& data, std::vector<char>& buffer, int& position,
238  Opm::Parallel::MPIComm comm);
239 
240 void pack(const char* str, std::vector<char>& buffer, int& position,
241  Opm::Parallel::MPIComm comm);
242 
244 
245 template<class T>
246 void unpack(T*, const std::size_t&, std::vector<char>&, int&,
247  Opm::Parallel::MPIComm, std::integral_constant<bool, false>);
248 
249 template<class T>
250 void unpack(T* data, const std::size_t& l, std::vector<char>& buffer, int& position,
251  Opm::Parallel::MPIComm comm,
252  std::integral_constant<bool, true>);
253 
254 template<class T>
255 void unpack(T* data, const std::size_t& l, std::vector<char>& buffer, int& position,
256  Opm::Parallel::MPIComm comm);
257 
258 template<class T>
259 void unpack(T&, std::vector<char>&, int&,
260  Opm::Parallel::MPIComm, std::integral_constant<bool, false>)
261 {
262  OPM_THROW(std::logic_error, "Packing not (yet) supported for this non-pod type.");
263 }
264 
265 template<class T>
266 void unpack(T& data, std::vector<char>& buffer, int& position,
267  Opm::Parallel::MPIComm comm, std::integral_constant<bool, true>)
268 {
269 #if HAVE_MPI
270  MPI_Unpack(buffer.data(), buffer.size(), &position, &data, 1,
271  Dune::MPITraits<T>::getType(), comm);
272 #else
273  (void) data;
274  (void) comm;
275  (void) buffer;
276  (void) position;
277 #endif
278 }
279 
280 template<class T>
281 void unpack(T& data, std::vector<char>& buffer, int& position,
282  Opm::Parallel::MPIComm comm)
283 {
284  unpack(data, buffer, position, comm, typename std::is_pod<T>::type());
285 }
286 
287 template<class T1, class T2>
288 void unpack(std::pair<T1,T2>& data, std::vector<char>& buffer, int& position,
289  Opm::Parallel::MPIComm comm);
290 
291 template<class T>
292 void unpack(std::optional<T>& data, std::vector<char>& buffer, int& position,
293  Opm::Parallel::MPIComm comm);
294 
295 template<class T, class A>
296 void unpack(std::vector<T,A>& data, std::vector<char>& buffer, int& position,
297  Opm::Parallel::MPIComm comm);
298 
299 template<class A>
300 void unpack(std::vector<bool,A>& data, std::vector<char>& buffer, int& position,
301  Opm::Parallel::MPIComm comm);
302 
303 template<class... Ts>
304 void unpack(std::tuple<Ts...>& data, std::vector<char>& buffer,
305  int& position, Opm::Parallel::MPIComm comm);
306 
307 template<class K, class C, class A>
308 void unpack(std::set<K,C,A>& data,
309  std::vector<char>& buffer, int& position,
310  Opm::Parallel::MPIComm comm);
311 
312 template<class T, class H, class KE, class A>
313 void unpack(std::unordered_set<T,H,KE,A>& data,
314  std::vector<char>& buffer, int& position,
315  Opm::Parallel::MPIComm comm);
316 
317 template<class T, size_t N>
318 void unpack(std::array<T,N>& data, std::vector<char>& buffer, int& position,
319  Opm::Parallel::MPIComm comm);
320 
321 template<class T1, class T2, class C, class A>
322 void unpack(std::map<T1,T2,C,A>& data, std::vector<char>& buffer, int& position,
323  Opm::Parallel::MPIComm comm);
324 
325 template<class T1, class T2, class H, class P, class A>
326 void unpack(std::unordered_map<T1,T2,H,P,A>& data, std::vector<char>& buffer, int& position,
327  Opm::Parallel::MPIComm comm);
328 
329 void unpack(char* str, std::size_t length, std::vector<char>& buffer, int& position,
330  Opm::Parallel::MPIComm comm);
331 
333 
334 #define ADD_PACK_PROTOTYPES(T) \
335  std::size_t packSize(const T& data, Opm::Parallel::MPIComm comm); \
336  void pack(const T& data, std::vector<char>& buffer, int& position, \
337  Opm::Parallel::MPIComm comm); \
338  void unpack(T& data, std::vector<char>& buffer, int& position, \
339  Opm::Parallel::MPIComm comm);
340 
341 ADD_PACK_PROTOTYPES(data::AquiferData)
342 ADD_PACK_PROTOTYPES(data::CarterTracyData)
343 ADD_PACK_PROTOTYPES(data::CellData)
344 ADD_PACK_PROTOTYPES(data::Connection)
345 ADD_PACK_PROTOTYPES(data::CurrentControl)
346 ADD_PACK_PROTOTYPES(data::FetkovichData)
347 ADD_PACK_PROTOTYPES(data::Rates)
348 ADD_PACK_PROTOTYPES(data::Segment)
349 ADD_PACK_PROTOTYPES(data::Solution)
350 ADD_PACK_PROTOTYPES(data::GuideRateValue)
351 ADD_PACK_PROTOTYPES(data::GroupConstraints)
352 ADD_PACK_PROTOTYPES(data::GroupGuideRates)
353 ADD_PACK_PROTOTYPES(data::GroupData)
354 ADD_PACK_PROTOTYPES(data::NodeData)
355 ADD_PACK_PROTOTYPES(data::GroupAndNetworkValues)
356 ADD_PACK_PROTOTYPES(data::NumericAquiferData)
357 ADD_PACK_PROTOTYPES(data::Well)
358 ADD_PACK_PROTOTYPES(data::Wells)
359 ADD_PACK_PROTOTYPES(RestartKey)
360 ADD_PACK_PROTOTYPES(RestartValue)
361 ADD_PACK_PROTOTYPES(std::string)
362 ADD_PACK_PROTOTYPES(time_point)
363 
364 template<typename T, typename... Args>
365 void variadic_packsize(size_t& size, Parallel::Communication comm, T& first, Args&&... args)
366 {
367  size += packSize(first, comm);
368  if constexpr (sizeof...(args) > 0)
369  variadic_packsize(size, comm, std::forward<Args>(args)...);
370 }
371 
372 template<typename T, typename... Args>
373 void variadic_pack(int& pos, std::vector<char>& buffer, Parallel::Communication comm, T& first, Args&&... args)
374 {
375  pack(first, buffer, pos, comm);
376  if constexpr (sizeof...(args) > 0)
377  variadic_pack(pos, buffer, comm, std::forward<Args>(args)...);
378 }
379 
380 template<typename T, typename... Args>
381 void variadic_unpack(int& pos, std::vector<char>& buffer, Parallel::Communication comm, T& first, Args&&... args)
382 {
383  unpack(first, buffer, pos, comm);
384  if constexpr (sizeof...(args) > 0)
385  variadic_unpack(pos, buffer, comm, std::forward<Args>(args)...);
386 }
387 
388 #if HAVE_MPI
389 template<typename... Args>
390 void broadcast(Parallel::Communication comm, int root, Args&&... args)
391 {
392  if (comm.size() == 1)
393  return;
394 
395  size_t size = 0;
396  if (comm.rank() == root)
397  variadic_packsize(size, comm, std::forward<Args>(args)...);
398 
399  comm.broadcast(&size, 1, root);
400  std::vector<char> buffer(size);
401  if (comm.rank() == root) {
402  int pos = 0;
403  variadic_pack(pos, buffer, comm, std::forward<Args>(args)...);
404  }
405  comm.broadcast(buffer.data(), size, root);
406  if (comm.rank() != root) {
407  int pos = 0;
408  variadic_unpack(pos, buffer, comm, std::forward<Args>(args)...);
409  }
410 }
411 #else
412 template<typename... Args>
413 void broadcast(Parallel::Communication, int, Args&&...)
414 {}
415 #endif
416 
417 } // end namespace Mpi
418 
419 RestartValue loadParallelRestart(const EclipseIO* eclIO, Action::State& actionState, SummaryState& summaryState,
420  const std::vector<RestartKey>& solutionKeys,
421  const std::vector<RestartKey>& extraKeys,
422  Parallel::Communication comm);
423 
424 } // end namespace Opm
425 #endif // PARALLEL_RESTART_HPP
This file contains a set of helper functions used by VFPProd / VFPInj.
Definition: BlackoilPhases.hpp:27