00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011 #include "Minuit2/MPIProcess.h"
00012
00013 #include <iostream>
00014
00015 namespace ROOT {
00016
00017 namespace Minuit2 {
00018
00019 unsigned int MPIProcess::fgGlobalSize = 1;
00020 unsigned int MPIProcess::fgGlobalRank = 0;
00021
00022
00023 unsigned int MPIProcess::fgCartSizeX = 0;
00024 unsigned int MPIProcess::fgCartSizeY = 0;
00025 unsigned int MPIProcess::fgCartDimension = 0;
00026 bool MPIProcess::fgNewCart = true;
00027
00028 #ifdef MPIPROC
00029 MPI::Intracomm* MPIProcess::fgCommunicator = 0;
00030 int MPIProcess::fgIndexComm = -1;
00031 MPI::Intracomm* MPIProcess::fgCommunicators[2] = {0};
00032 unsigned int MPIProcess::fgIndecesComm[2] = {0};
00033 #endif
00034
00035 MPIProcess::MPIProcess(unsigned int nelements, unsigned int indexComm) :
00036 fNelements(nelements), fSize(1), fRank(0)
00037 {
00038
00039
00040 indexComm = (indexComm==0) ? 0 : 1;
00041
00042 #ifdef MPIPROC
00043
00044 StartMPI();
00045
00046 if (fgGlobalSize==fgCartDimension &&
00047 fgCartSizeX!=fgCartDimension && fgCartSizeY!=fgCartDimension) {
00048
00049
00050 if (fgCommunicator==0 && fgIndexComm<0 && fgNewCart) {
00051
00052 std::cout << "Info --> MPIProcess::MPIProcess: Declare cartesian Topology ("
00053 << fgCartSizeX << "x" << fgCartSizeY << ")" << std::endl;
00054
00055 int color = fgGlobalRank / fgCartSizeY;
00056 int key = fgGlobalRank % fgCartSizeY;
00057
00058 fgCommunicators[0] = new MPI::Intracomm(MPI::COMM_WORLD.Split(key,color));
00059 fgCommunicators[1] = new MPI::Intracomm(MPI::COMM_WORLD.Split(color,key));
00060
00061 fgNewCart = false;
00062
00063 }
00064
00065 fgIndexComm++;
00066
00067 if (fgIndexComm>1 || fgCommunicator==(&(MPI::COMM_WORLD))) {
00068 std::cerr << "Error --> MPIProcess::MPIProcess: Requiring more than 2 dimensions in the topology!" << std::endl;
00069 MPI::COMM_WORLD.Abort(-1);
00070 }
00071
00072
00073 if (((unsigned int)fgIndexComm)<indexComm)
00074 fgCommunicator = &(MPI::COMM_WORLD);
00075 else {
00076 fgIndecesComm[fgIndexComm] = indexComm;
00077 fgCommunicator = fgCommunicators[fgIndecesComm[fgIndexComm]];
00078 }
00079
00080 }
00081 else {
00082
00083 if (fgCartDimension!=0 && fgGlobalSize!=fgCartDimension) {
00084 std::cout << "Warning --> MPIProcess::MPIProcess: Cartesian dimension doesn't correspond to # total procs!" << std::endl;
00085 std::cout << "Warning --> MPIProcess::MPIProcess: Ignoring topology, use all procs for X." << std::endl;
00086 std::cout << "Warning --> MPIProcess::MPIProcess: Resetting topology..." << std::endl;
00087 fgCartSizeX = fgGlobalSize;
00088 fgCartSizeY = 1;
00089 fgCartDimension = fgGlobalSize;
00090 }
00091
00092 if (fgIndexComm<0) {
00093 if (fgCartSizeX==fgCartDimension) {
00094 fgCommunicators[0] = &(MPI::COMM_WORLD);
00095 fgCommunicators[1] = 0;
00096 }
00097 else {
00098 fgCommunicators[0] = 0;
00099 fgCommunicators[1] = &(MPI::COMM_WORLD);
00100 }
00101 }
00102
00103 fgIndexComm++;
00104
00105 if (fgIndexComm>1) {
00106 std::cerr << "Error --> MPIProcess::MPIProcess: More than 2 nested MPI calls!" << std::endl;
00107 MPI::COMM_WORLD.Abort(-1);
00108 }
00109
00110 fgIndecesComm[fgIndexComm] = indexComm;
00111
00112
00113 if (fgCommunicator!=0 && fgCommunicators[indexComm]!=0) {
00114 std::cout << "Warning --> MPIProcess::MPIProcess: Requiring 2 nested MPI calls!" << std::endl;
00115 std::cout << "Warning --> MPIProcess::MPIProcess: Ignoring second call." << std::endl;
00116 fgIndecesComm[fgIndexComm] = (indexComm==0) ? 1 : 0;
00117 }
00118
00119 fgCommunicator = fgCommunicators[fgIndecesComm[fgIndexComm]];
00120
00121 }
00122
00123
00124 if (fgCommunicator!=0) {
00125 fSize = fgCommunicator->Get_size();
00126 fRank = fgCommunicator->Get_rank();
00127 }
00128 else {
00129
00130 fSize = 1;
00131 fRank = 0;
00132 }
00133
00134
00135 if (fSize>fNelements) {
00136 std::cerr << "Error --> MPIProcess::MPIProcess: more processors than elements!" << std::endl;
00137 MPI::COMM_WORLD.Abort(-1);
00138 }
00139
00140 #endif
00141
00142 fNumElements4JobIn = fNelements / fSize;
00143 fNumElements4JobOut = fNelements % fSize;
00144
00145 }
00146
00147 MPIProcess::~MPIProcess()
00148 {
00149
00150 #ifdef MPIPROC
00151 fgCommunicator = 0;
00152 fgIndexComm--;
00153 if (fgIndexComm==0)
00154 fgCommunicator = fgCommunicators[fgIndecesComm[fgIndexComm]];
00155
00156 #endif
00157
00158 }
00159
00160 bool MPIProcess::SyncVector(ROOT::Minuit2::MnAlgebraicVector &mnvector)
00161 {
00162
00163
00164 if (fSize<2)
00165 return false;
00166
00167 if (mnvector.size()!=fNelements) {
00168 std::cerr << "Error --> MPIProcess::SyncVector: # defined elements different from # requested elements!" << std::endl;
00169 std::cerr << "Error --> MPIProcess::SyncVector: no MPI syncronization is possible!" << std::endl;
00170 exit(-1);
00171 }
00172
00173 #ifdef MPIPROC
00174 unsigned int numElements4ThisJob = NumElements4Job(fRank);
00175 unsigned int startElementIndex = StartElementIndex();
00176 unsigned int endElementIndex = EndElementIndex();
00177
00178 double dvectorJob[numElements4ThisJob];
00179 for(unsigned int i = startElementIndex; i<endElementIndex; i++)
00180 dvectorJob[i-startElementIndex] = mnvector(i);
00181
00182 double dvector[fNelements];
00183 MPISyncVector(dvectorJob,numElements4ThisJob,dvector);
00184
00185 for (unsigned int i = 0; i<fNelements; i++) {
00186 mnvector(i) = dvector[i];
00187 }
00188
00189 return true;
00190
00191 #else
00192
00193 std::cerr << "Error --> MPIProcess::SyncVector: no MPI syncronization is possible!" << std::endl;
00194 exit(-1);
00195
00196 #endif
00197
00198 }
00199
00200
00201 bool MPIProcess::SyncSymMatrixOffDiagonal(ROOT::Minuit2::MnAlgebraicSymMatrix &mnmatrix)
00202 {
00203
00204
00205 if (fSize<2)
00206 return false;
00207
00208 if (mnmatrix.size()-mnmatrix.Nrow()!=fNelements) {
00209 std::cerr << "Error --> MPIProcess::SyncSymMatrixOffDiagonal: # defined elements different from # requested elements!" << std::endl;
00210 std::cerr << "Error --> MPIProcess::SyncSymMatrixOffDiagonal: no MPI syncronization is possible!" << std::endl;
00211 exit(-1);
00212 }
00213
00214 #ifdef MPIPROC
00215 unsigned int numElements4ThisJob = NumElements4Job(fRank);
00216 unsigned int startElementIndex = StartElementIndex();
00217 unsigned int endElementIndex = EndElementIndex();
00218 unsigned int nrow = mnmatrix.Nrow();
00219
00220 unsigned int offsetVect = 0;
00221 for (unsigned int i = 0; i<startElementIndex; i++)
00222 if ((i+offsetVect)%(nrow-1)==0) offsetVect += (i+offsetVect)/(nrow-1);
00223
00224 double dvectorJob[numElements4ThisJob];
00225 for(unsigned int i = startElementIndex; i<endElementIndex; i++) {
00226
00227 int x = (i+offsetVect)/(nrow-1);
00228 if ((i+offsetVect)%(nrow-1)==0) offsetVect += x;
00229 int y = (i+offsetVect)%(nrow-1)+1;
00230
00231 dvectorJob[i-startElementIndex] = mnmatrix(x,y);
00232
00233 }
00234
00235 double dvector[fNelements];
00236 MPISyncVector(dvectorJob,numElements4ThisJob,dvector);
00237
00238 offsetVect = 0;
00239 for (unsigned int i = 0; i<fNelements; i++) {
00240
00241 int x = (i+offsetVect)/(nrow-1);
00242 if ((i+offsetVect)%(nrow-1)==0) offsetVect += x;
00243 int y = (i+offsetVect)%(nrow-1)+1;
00244
00245 mnmatrix(x,y) = dvector[i];
00246
00247 }
00248
00249 return true;
00250
00251 #else
00252
00253 std::cerr << "Error --> MPIProcess::SyncMatrix: no MPI syncronization is possible!" << std::endl;
00254 exit(-1);
00255
00256 #endif
00257
00258 }
00259
00260 #ifdef MPIPROC
00261 void MPIProcess::MPISyncVector(double *ivector, int svector, double *ovector)
00262 {
00263 int offsets[fSize];
00264 int nconts[fSize];
00265 nconts[0] = NumElements4Job(0);
00266 offsets[0] = 0;
00267 for (unsigned int i = 1; i<fSize; i++) {
00268 nconts[i] = NumElements4Job(i);
00269 offsets[i] = nconts[i-1] + offsets[i-1];
00270 }
00271
00272 fgCommunicator->Allgatherv(ivector,svector,MPI::DOUBLE,
00273 ovector,nconts,offsets,MPI::DOUBLE);
00274
00275 }
00276
00277 bool MPIProcess::SetCartDimension(unsigned int dimX, unsigned int dimY)
00278 {
00279 if (fgCommunicator!=0 || fgIndexComm>=0) {
00280 std::cout << "Warning --> MPIProcess::SetCartDimension: MPIProcess already declared! Ignoring command..." << std::endl;
00281 return false;
00282 }
00283 if (dimX*dimY<=0) {
00284 std::cout << "Warning --> MPIProcess::SetCartDimension: Invalid topology! Ignoring command..." << std::endl;
00285 return false;
00286 }
00287
00288 StartMPI();
00289
00290 if (fgGlobalSize!=dimX*dimY) {
00291 std::cout << "Warning --> MPIProcess::SetCartDimension: Cartesian dimension doesn't correspond to # total procs!" << std::endl;
00292 std::cout << "Warning --> MPIProcess::SetCartDimension: Ignoring command..." << std::endl;
00293 return false;
00294 }
00295
00296 if (fgCartSizeX!=dimX || fgCartSizeY!=dimY) {
00297 fgCartSizeX = dimX; fgCartSizeY = dimY;
00298 fgCartDimension = fgCartSizeX * fgCartSizeY;
00299 fgNewCart = true;
00300
00301 if (fgCommunicators[0]!=0 && fgCommunicators[1]!=0) {
00302 delete fgCommunicators[0]; fgCommunicators[0] = 0; fgIndecesComm[0] = 0;
00303 delete fgCommunicators[1]; fgCommunicators[1] = 0; fgIndecesComm[1] = 0;
00304 }
00305 }
00306
00307 return true;
00308
00309 }
00310
00311 bool MPIProcess::SetDoFirstMPICall(bool doFirstMPICall)
00312 {
00313
00314 StartMPI();
00315
00316 bool ret;
00317 if (doFirstMPICall)
00318 ret = SetCartDimension(fgGlobalSize,1);
00319 else
00320 ret = SetCartDimension(1,fgGlobalSize);
00321
00322 return ret;
00323
00324 }
00325
00326 #endif
00327
00328 #ifdef MPIPROC
00329 MPITerminate dummyMPITerminate = MPITerminate();
00330 #endif
00331
00332 }
00333
00334 }