152 #ifndef GETFEM_CONFIG_H__ 153 #define GETFEM_CONFIG_H__ 163 #ifndef GETFEM_PARA_LEVEL 164 # define GETFEM_PARA_LEVEL 0 167 #define GETFEM_MPI_INIT(argc, argv) {GMM_TRACE1("Running sequential Getfem");} 168 #define GETFEM_MPI_FINALIZE {} 170 #if defined(GETFEM_HAVE_DMUMPS_C_H) 171 # ifndef GMM_USES_MUMPS 172 # define GMM_USES_MUMPS 180 # undef GMM_TRACE_MSG_MPI 181 # define GMM_TRACE_MSG_MPI \ 182 int mip_rk__; MPI_Comm_rank(MPI_COMM_WORLD, &mip_rk__); \ 185 # undef GETFEM_MPI_INIT 186 # define GETFEM_MPI_INIT(argc, argv) { \ 187 MPI_Init(&argc, &argv); \ 188 GMM_TRACE1("Running parallelized Getfem level " << GETFEM_PARA_LEVEL); \ 190 # undef GETFEM_MPI_FINALIZE 191 # define GETFEM_MPI_FINALIZE { MPI_Finalize(); } 196 #define MUMPS_PARA_SOLVER 1 197 #define SCHWARZADD_PARA_SOLVER 2 199 # ifndef GETFEM_PARA_SOLVER 200 # define GETFEM_PARA_SOLVER MUMPS_PARA_SOLVER 203 # if GETFEM_PARA_SOLVER == MUMPS_PARA_SOLVER 204 # ifndef GMM_USES_MUMPS 205 # define GMM_USES_MUMPS 219 using std::endl;
using std::cout;
using std::cerr;
220 using std::ends;
using std::cin;
223 #if GETFEM_PARA_LEVEL > 1 224 template <
typename T>
inline T MPI_SUM_SCALAR(T a) {
225 T b; MPI_Allreduce(&a,&b,1,gmm::mpi_type(a), MPI_SUM, MPI_COMM_WORLD);
229 template <
typename VECT>
inline void MPI_SUM_VECTOR(
const VECT &VV) {
230 VECT &V =
const_cast<VECT &
>(VV);
231 typedef typename gmm::linalg_traits<VECT>::value_type T;
232 std::vector<T> W(gmm::vect_size(V));
233 MPI_Allreduce((
void *)(&(V[0])), &(W[0]), gmm::vect_size(V),
234 gmm::mpi_type(T()), MPI_SUM, MPI_COMM_WORLD);
238 template <
typename VECT>
inline void MPI_MAX_VECTOR(
const VECT &VV) {
239 VECT &V =
const_cast<VECT &
>(VV);
240 typedef typename gmm::linalg_traits<VECT>::value_type T;
241 std::vector<T> W(gmm::vect_size(V));
242 MPI_Allreduce((
void *)(&(V[0])), &(W[0]), gmm::vect_size(V),
243 gmm::mpi_type(T()), MPI_MAX, MPI_COMM_WORLD);
247 template <
typename T>
void MPI_BCAST0_SCALAR(T &a) {
248 MPI_Bcast((
void *)(&a), 1, gmm::mpi_type(a), 0, MPI_COMM_WORLD);
251 template <
typename VECT>
inline void MPI_BCAST0_VECTOR(
const VECT &VV) {
252 VECT &V =
const_cast<VECT &
>(VV);
253 typedef typename gmm::linalg_traits<VECT>::value_type T;
254 MPI_Bcast((
void *)(&(V[0])), gmm::vect_size(V), gmm::mpi_type(T()), 0,
258 template <
typename VECT1,
typename VECT2>
259 inline void MPI_SUM_VECTOR(
const VECT1 &VV,
const VECT2 &WW) {
260 VECT1 &V =
const_cast<VECT1 &
>(VV);
261 VECT2 &W =
const_cast<VECT2 &
>(WW);
262 typedef typename gmm::linalg_traits<VECT1>::value_type T;
263 MPI_Allreduce((
void *)(&(V[0])), &(W[0]),
264 gmm::vect_size(V), gmm::mpi_type(T()),
265 MPI_SUM, MPI_COMM_WORLD);
268 inline bool MPI_IS_MASTER(
void)
269 {
int rk; MPI_Comm_rank(MPI_COMM_WORLD, &rk);
return !rk; }
271 template <
typename T>
inline 273 typedef typename gmm::col_matrix< gmm::rsvector<T> > MAT;
274 MAT &MM =
const_cast<MAT &
>(M);
277 MPI_Comm_rank(MPI_COMM_WORLD, &rk);
278 MPI_Comm_size(MPI_COMM_WORLD, &nbp);
280 size_type nr = gmm::mat_nrows(MM), nc = gmm::mat_ncols(MM);
282 gmm::dense_matrix<int> all_nnz(nc, nbp);
283 std::vector<int> my_nnz(nc), owner(nc);
288 my_nnz[i] = gmm::nnz(gmm::mat_col(MM, i));
291 MPI_Allgather((
void *)(&(my_nnz[0])), nc, MPI_INT,
292 (
void *)(&(all_nnz(0,0))), nc, MPI_INT, MPI_COMM_WORLD);
295 std::vector<int> contributors(nc);
296 for (
int i = 0; i < nc; ++i) {
298 int column_master = -1, max_nnz = 0;
299 contributors.resize(0);
302 for (
int j = nbp-1; j >= 0; --j) {
304 if (rk != j) contributors.push_back(j);
305 if (column_master == -1 || all_nnz(i, j) > max_nnz)
306 { column_master = j; max_nnz = all_nnz(i, j); }
310 if (column_master == rk) {
311 for (
int j = 0; j < int(contributors.size()); ++j) {
312 typename gmm::rsvector<T>::base_type_ &VV = V;
313 int si = all_nnz(i, contributors[j]);
315 MPI_Recv((
void *)(&(VV[0])),
316 si*
sizeof(gmm::elt_rsvector_<T>),
317 MPI_BYTE, contributors[j], 0,
318 MPI_COMM_WORLD, MPI_STATUS_IGNORE);
319 gmm::add(V, gmm::mat_col(MM, i));
322 typename gmm::rsvector<T>::base_type_ &VV = MM.col(i);
323 MPI_Send((
void *)(&(VV[0])),
324 my_nnz[i]*
sizeof(gmm::elt_rsvector_<T>),
325 MPI_BYTE, column_master, 0,
334 my_nnz[i] = gmm::nnz(gmm::mat_col(MM, i));
335 owner[i] = (my_nnz[i]) ? rk : 0;
337 MPI_SUM_VECTOR(my_nnz);
338 MPI_SUM_VECTOR(owner);
345 typename gmm::rsvector<T>::base_type_ &VV = MM.col(i);
346 if (owner[i] != rk) VV.resize(my_nnz[i]);
347 MPI_Bcast((
void *)(&(VV[0])), my_nnz[i]*
sizeof(gmm::elt_rsvector_<T>),
348 MPI_BYTE, owner[i], MPI_COMM_WORLD);
353 template <
typename MAT>
inline void MPI_SUM_SPARSE_MATRIX(
const MAT &M) {
354 typedef typename gmm::linalg_traits<MAT>::value_type T;
355 int nbp; MPI_Comm_size(MPI_COMM_WORLD, &nbp);
357 size_type nr = gmm::mat_nrows(M), nc = gmm::mat_ncols(M);
358 gmm::col_matrix< gmm::rsvector<T> > MM(nr, nc);
359 GMM_WARNING2(
"MPI_SUM_SPARSE_MATRIX: A copy of the matrix is done. " 360 "To avoid it, adapt MPI_SUM_SPARSE_MATRIX to " 361 "your matrix type.");
363 MPI_SUM_SPARSE_MATRIX(MM);
364 gmm::copy(MM, const_cast<MAT &>(M));
367 template <
typename T>
inline T MPI_SUM_SCALAR(T a) {
return a; }
368 template <
typename VECT>
inline void MPI_SUM_VECTOR(
const VECT &) {}
369 template <
typename VECT>
inline void MPI_MAX_VECTOR(
const VECT &) {}
370 template <
typename T>
void MPI_BCAST0_SCALAR(T &a) {}
371 template <
typename VECT>
inline void MPI_BCAST0_VECTOR(
const VECT &) {}
372 template <
typename MAT>
inline void MPI_SUM_SPARSE_MATRIX(
const MAT &) {}
373 template <
typename VECT1,
typename VECT2>
374 inline void MPI_SUM_VECTOR(
const VECT1 &V,
const VECT2 &WW)
376 VECT2 &W =
const_cast<VECT2 &
>(WW);
379 inline bool MPI_IS_MASTER(
void) {
return true; }
384 using bgeot::dim_type;
387 using bgeot::scalar_type;
388 using bgeot::complex_type;
389 using bgeot::long_scalar_type;
390 using bgeot::opt_long_scalar_type;
393 using bgeot::base_vector;
394 using bgeot::base_complex_vector;
395 using bgeot::base_matrix;
396 using bgeot::base_complex_matrix;
397 using bgeot::base_tensor;
398 using bgeot::base_complex_tensor;
402 #if defined(__GNUC__) 405 inline bool isnan(scalar_type x) {
return x != x; }
Multivariate polynomials.
size_t size_type
used as the common size type in the library
sparse vector built upon std::vector.
GEneric Tool for Finite Element Methods.
gmm::uint16_type short_type
used as the common short type integer in the library
tensor class, used in mat_elem computations.
defines and typedefs for namespace bgeot
SuperLU interface for getfem.