parallel.cpp 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. ///////////////////////////////////////////////////////////////////////////////////////
  2. /// \file parallel.cpp
  3. /// \brief Functionality for parallel computation
  4. ///
  5. /// \author Joe Siltberg
  6. /// $Date: 2013-10-10 10:20:33 +0200 (Thu, 10 Oct 2013) $
  7. ///
  8. ///////////////////////////////////////////////////////////////////////////////////////
  9. // mpi.h needs to be the first include (or specifically, before stdio.h),
  10. // due to a bug in the MPI-2 standard (both stdio.h and the C++ MPI API
  11. // defines SEEK_SET, SEEK_CUR and SEEK_SET(!))
  12. #ifdef HAVE_MPI
  13. #include <mpi.h>
  14. #endif
  15. #include "config.h"
  16. #include "parallel.h"
  17. #include "shell.h"
  18. #include <memory>
  19. #include <string>
  20. namespace GuessParallel {
  21. bool parallel = false;
  22. #ifdef HAVE_MPI
  23. /// A class whose only purpose is to terminate the MPI library when deleted
  24. class FinalizeCaller {
  25. public:
  26. ~FinalizeCaller() {
  27. MPI_Finalize();
  28. }
  29. };
  30. /// The auto pointer will delete the object some time after main() is finished
  31. std::auto_ptr<FinalizeCaller> destructor;
  32. #endif
  33. void init(int& argc, char**& argv) {
  34. #ifdef HAVE_MPI
  35. // Only initialize MPI is the command line options explicitly
  36. // asks for a parallel run (with the -parallel option).
  37. // In most cases it wouldn't hurt to initialize MPI even if
  38. // it's not used, but apparently some implementations start
  39. // by going to the users home directory if the program isn't
  40. // launched by an mpiexec/mpirun launcher.
  41. // Unfortunately, since MPI initialization must be done before
  42. // we parse our options with CommandLineArguments, we need to
  43. // look for the -parallel option here by ourselves.
  44. // The file-global variable parallel is initiated = false;
  45. for (int i = 0; i < argc; ++i) {
  46. if (std::string(argv[i]) == "-parallel") {
  47. parallel = true;
  48. break;
  49. }
  50. }
  51. // ecev3 - Spinup?
  52. bool spinup = false;
  53. for (int i = 0; i < argc; ++i) {
  54. if (std::string(argv[i]) == "-islpjgspinup") {
  55. spinup = true;
  56. break;
  57. }
  58. }
  59. if (parallel && (spinup || ECEARTHWITHCRUNCEP)) {
  60. // We won't use OASIS-MCT in this case, so we must initialise MPI.
  61. dprintf("Parallel spinup of LPJ_GUESS - calling MPI_Init\n");
  62. int ierr = MPI_Init(&argc, &argv);
  63. dprintf("LPJ_GUESS returned from calling MPI_Init with code %i\n",ierr);
  64. // Make sure the MPI_Finalize function is called at program termination
  65. destructor = std::auto_ptr<FinalizeCaller>(new FinalizeCaller());
  66. }
  67. #endif
  68. }
  69. int get_rank() {
  70. #ifdef HAVE_MPI
  71. int rank;
  72. int ierr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  73. return rank;
  74. #else
  75. return 0;
  76. #endif
  77. }
  78. int get_rank_specific(int lc) {
  79. #ifdef HAVE_MPI
  80. int rank;
  81. #ifdef OPEN_MPI
  82. MPI_Comm comm = MPI_Comm_f2c((MPI_Fint) lc);
  83. #else
  84. int comm = lc;
  85. #endif
  86. //dprintf("LPJ_GUESS - get_rank_specific() with communicator %i\n",lc);
  87. int ierr = MPI_Comm_rank(comm, &rank);
  88. //dprintf("LPJ_GUESS - get_rank_specific() returned error: %i and rank %i\n",ierr,rank);
  89. return rank;
  90. #else
  91. return 0;
  92. #endif
  93. }
  94. int get_global_rank() {
  95. #ifdef HAVE_MPI
  96. int rank;
  97. MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  98. return rank;
  99. #else
  100. return 0;
  101. #endif
  102. }
  103. int get_num_processes() {
  104. #ifdef HAVE_MPI
  105. if (parallel) {
  106. int size;
  107. MPI_Comm_size(MPI_COMM_WORLD, &size);
  108. return size;
  109. }else
  110. return 1;
  111. #else
  112. return 1;
  113. #endif
  114. }
  115. int get_num_local_processes(int lc) {
  116. #ifdef HAVE_MPI
  117. int size;
  118. #ifdef OPEN_MPI
  119. MPI_Comm comm = MPI_Comm_f2c((MPI_Fint) lc);
  120. #else
  121. int comm = lc;
  122. #endif
  123. MPI_Comm_size(comm, &size);
  124. return size;
  125. #else
  126. return 1;
  127. #endif
  128. }
  129. int get_num_global_processes() {
  130. #ifdef HAVE_MPI
  131. int size;
  132. MPI_Comm_size(MPI_COMM_WORLD, &size);
  133. return size;
  134. #else
  135. return 1;
  136. #endif
  137. }
  138. }