Actual source code: mpimatmatmult.c

  2: /*
  3:   Defines matrix-matrix product routines for pairs of MPIAIJ matrices
  4:           C = A * B
  5: */
  6: #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/
  7: #include <../src/mat/utils/freespace.h>
  8: #include <../src/mat/impls/aij/mpi/mpiaij.h>
  9: #include <petscbt.h>
 10: #include <../src/mat/impls/dense/mpi/mpidense.h>

 14: PetscErrorCode MatMatMult_MPIAIJ_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill, Mat *C)
 15: {

 19:   if (scall == MAT_INITIAL_MATRIX){
 20:     MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);/* numeric product is computed as well */
 21:   } else if (scall == MAT_REUSE_MATRIX){
 22:     MatMatMultNumeric_MPIAIJ_MPIAIJ(A,B,*C);
 23:   } else {
 24:     SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
 25:   }
 26:   return(0);
 27: }

 31: PetscErrorCode PetscContainerDestroy_Mat_MatMatMultMPI(void *ptr)
 32: {
 33:   PetscErrorCode       ierr;
 34:   Mat_MatMatMultMPI    *mult=(Mat_MatMatMultMPI*)ptr;

 37:   PetscFree2(mult->startsj,mult->startsj_r);
 38:   PetscFree(mult->bufa);
 39:   ISDestroy(&mult->isrowa);
 40:   ISDestroy(&mult->isrowb);
 41:   ISDestroy(&mult->iscolb);
 42:   MatDestroy(&mult->C_seq);
 43:   MatDestroy(&mult->A_loc);
 44:   MatDestroy(&mult->B_seq);
 45:   MatDestroy(&mult->B_loc);
 46:   MatDestroy(&mult->B_oth);
 47:   PetscFree(mult->abi);
 48:   PetscFree(mult->abj);
 49:   PetscFree(mult);
 50:   return(0);
 51: }

 56: PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat A)
 57: {
 58:   PetscErrorCode     ierr;
 59:   PetscContainer     container;
 60:   Mat_MatMatMultMPI  *mult=PETSC_NULL;

 63:   PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);
 64:   if (!container) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Container does not exit");
 65:   PetscContainerGetPointer(container,(void **)&mult);
 66:   A->ops->destroy   = mult->destroy;
 67:   A->ops->duplicate = mult->duplicate;
 68:   if (A->ops->destroy) {
 69:     (*A->ops->destroy)(A);
 70:   }
 71:   PetscObjectCompose((PetscObject)A,"Mat_MatMatMultMPI",0);
 72:   return(0);
 73: }

 77: PetscErrorCode MatDuplicate_MPIAIJ_MatMatMult(Mat A, MatDuplicateOption op, Mat *M)
 78: {
 79:   PetscErrorCode     ierr;
 80:   Mat_MatMatMultMPI  *mult;
 81:   PetscContainer     container;

 84:   PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);
 85:   if (!container) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Container does not exit");
 86:   PetscContainerGetPointer(container,(void **)&mult);
 87:   /* Note: the container is not duplicated, because it requires deep copying of
 88:      several large data sets (see PetscContainerDestroy_Mat_MatMatMultMPI()).
 89:      These data sets are only used for repeated calling of MatMatMultNumeric().
 90:      *M is unlikely being used in this way. Thus we create *M with pure mpiaij format */
 91:   (*mult->duplicate)(A,op,M);
 92:   (*M)->ops->destroy   = mult->destroy;   /* = MatDestroy_MPIAIJ, *M doesn't duplicate A's container! */
 93:   (*M)->ops->duplicate = mult->duplicate; /* = MatDuplicate_MPIAIJ */
 94:   return(0);
 95: }

 99: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat *C)
100: {
101:   PetscErrorCode     ierr;
102:   PetscInt           start,end;
103:   Mat_MatMatMultMPI  *mult;
104:   PetscContainer     container;

107:   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend){
108:     SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
109:   }
110:   PetscNew(Mat_MatMatMultMPI,&mult);

112:   /* create a seq matrix B_seq = submatrix of B by taking rows of B that equal to nonzero col of A */
113:   MatGetBrowsOfAcols(A,B,MAT_INITIAL_MATRIX,&mult->isrowb,&mult->iscolb,&mult->brstart,&mult->B_seq);

115:   /*  create a seq matrix A_seq = submatrix of A by taking all local rows of A */
116:   start = A->rmap->rstart; end = A->rmap->rend;
117:   ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&mult->isrowa);
118:   MatMPIAIJGetLocalMatCondensed(A,MAT_INITIAL_MATRIX,&mult->isrowa,&mult->isrowb,&mult->A_loc);

120:   /* compute C_seq = A_seq * B_seq */
121:   MatMatMult_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,MAT_INITIAL_MATRIX,fill,&mult->C_seq);

123:   /* create mpi matrix C by concatinating C_seq */
124:   PetscObjectReference((PetscObject)mult->C_seq); /* prevent C_seq being destroyed by MatMerge() */
125:   MatMerge(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,MAT_INITIAL_MATRIX,C);

127:   /* attach the supporting struct to C for reuse of symbolic C */
128:   PetscContainerCreate(PETSC_COMM_SELF,&container);
129:   PetscContainerSetPointer(container,mult);
130:   PetscContainerSetUserDestroy(container,PetscContainerDestroy_Mat_MatMatMultMPI);
131:   PetscObjectCompose((PetscObject)(*C),"Mat_MatMatMultMPI",(PetscObject)container);
132:   PetscContainerDestroy(&container);
133:   mult->destroy   = (*C)->ops->destroy;
134:   mult->duplicate = (*C)->ops->duplicate;
135:   (*C)->ops->destroy   = MatDestroy_MPIAIJ_MatMatMult;
136:   (*C)->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult;
137:   return(0);
138: }

140: /* This routine is called ONLY in the case of reusing previously computed symbolic C */
143: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat B,Mat C)
144: {
145:   PetscErrorCode     ierr;
146:   Mat                *seq;
147:   Mat_MatMatMultMPI  *mult;
148:   PetscContainer     container;

151:   PetscObjectQuery((PetscObject)C,"Mat_MatMatMultMPI",(PetscObject *)&container);
152:   if (!container) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Container does not exit");
153:   PetscContainerGetPointer(container,(void **)&mult);

155:   seq = &mult->B_seq;
156:   MatGetSubMatrices(B,1,&mult->isrowb,&mult->iscolb,MAT_REUSE_MATRIX,&seq);
157:   mult->B_seq = *seq;

159:   seq = &mult->A_loc;
160:   MatGetSubMatrices(A,1,&mult->isrowa,&mult->isrowb,MAT_REUSE_MATRIX,&seq);
161:   mult->A_loc = *seq;

163:   MatMatMult_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,MAT_REUSE_MATRIX,0.0,&mult->C_seq);

165:   PetscObjectReference((PetscObject)mult->C_seq);
166:   MatMerge(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,MAT_REUSE_MATRIX,&C);
167:   return(0);
168: }

172: PetscErrorCode MatMatMult_MPIAIJ_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
173: {

177:   if (scall == MAT_INITIAL_MATRIX){
178:     MatMatMultSymbolic_MPIAIJ_MPIDense(A,B,fill,C);
179:   }
180:   MatMatMultNumeric_MPIAIJ_MPIDense(A,B,*C);
181:   return(0);
182: }

184: typedef struct {
185:   Mat         workB;
186:   PetscScalar *rvalues,*svalues;
187:   MPI_Request *rwaits,*swaits;
188: } MPIAIJ_MPIDense;

192: PetscErrorCode MPIAIJ_MPIDenseDestroy(void *ctx)
193: {
194:   MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*) ctx;
195:   PetscErrorCode  ierr;

198:   MatDestroy(&contents->workB);
199:   PetscFree4(contents->rvalues,contents->svalues,contents->rwaits,contents->swaits);
200:   PetscFree(contents);
201:   return(0);
202: }

206: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C)
207: {
208:   PetscErrorCode         ierr;
209:   Mat_MPIAIJ             *aij = (Mat_MPIAIJ*) A->data;
210:   PetscInt               nz = aij->B->cmap->n;
211:   PetscContainer         container;
212:   MPIAIJ_MPIDense        *contents;
213:   VecScatter             ctx = aij->Mvctx;
214:   VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata;
215:   VecScatter_MPI_General *to   = ( VecScatter_MPI_General*) ctx->todata;
216:   PetscInt               m=A->rmap->n,n=B->cmap->n;

219:   MatCreate(((PetscObject)B)->comm,C);
220:   MatSetSizes(*C,m,n,A->rmap->N,B->cmap->N);
221:   MatSetType(*C,MATMPIDENSE);
222:   MatAssemblyBegin(*C,MAT_FINAL_ASSEMBLY);
223:   MatAssemblyEnd(*C,MAT_FINAL_ASSEMBLY);

225:   PetscNew(MPIAIJ_MPIDense,&contents);
226:   /* Create work matrix used to store off processor rows of B needed for local product */
227:   MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap->N,PETSC_NULL,&contents->workB);
228:   /* Create work arrays needed */
229:   PetscMalloc4(B->cmap->N*from->starts[from->n],PetscScalar,&contents->rvalues,
230:                       B->cmap->N*to->starts[to->n],PetscScalar,&contents->svalues,
231:                       from->n,MPI_Request,&contents->rwaits,
232:                       to->n,MPI_Request,&contents->swaits);

234:   PetscContainerCreate(((PetscObject)A)->comm,&container);
235:   PetscContainerSetPointer(container,contents);
236:   PetscContainerSetUserDestroy(container,MPIAIJ_MPIDenseDestroy);
237:   PetscObjectCompose((PetscObject)(*C),"workB",(PetscObject)container);
238:   PetscContainerDestroy(&container);
239:   return(0);
240: }

244: /*
245:     Performs an efficient scatter on the rows of B needed by this process; this is
246:     a modification of the VecScatterBegin_() routines.
247: */
248: PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,Mat C,Mat *outworkB)
249: {
250:   Mat_MPIAIJ             *aij = (Mat_MPIAIJ*)A->data;
251:   PetscErrorCode         ierr;
252:   PetscScalar            *b,*w,*svalues,*rvalues;
253:   VecScatter             ctx = aij->Mvctx;
254:   VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata;
255:   VecScatter_MPI_General *to   = ( VecScatter_MPI_General*) ctx->todata;
256:   PetscInt               i,j,k;
257:   PetscInt               *sindices,*sstarts,*rindices,*rstarts;
258:   PetscMPIInt            *sprocs,*rprocs,nrecvs;
259:   MPI_Request            *swaits,*rwaits;
260:   MPI_Comm               comm = ((PetscObject)A)->comm;
261:   PetscMPIInt            tag = ((PetscObject)ctx)->tag,ncols = B->cmap->N, nrows = aij->B->cmap->n,imdex,nrowsB = B->rmap->n;
262:   MPI_Status             status;
263:   MPIAIJ_MPIDense        *contents;
264:   PetscContainer         container;
265:   Mat                    workB;

268:   PetscObjectQuery((PetscObject)C,"workB",(PetscObject*)&container);
269:   if (!container) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Container does not exit");
270:   PetscContainerGetPointer(container,(void**)&contents);

272:   workB = *outworkB = contents->workB;
273:   if (nrows != workB->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",nrows,workB->cmap->n);
274:   sindices  = to->indices;
275:   sstarts   = to->starts;
276:   sprocs    = to->procs;
277:   swaits    = contents->swaits;
278:   svalues   = contents->svalues;

280:   rindices  = from->indices;
281:   rstarts   = from->starts;
282:   rprocs    = from->procs;
283:   rwaits    = contents->rwaits;
284:   rvalues   = contents->rvalues;

286:   MatGetArray(B,&b);
287:   MatGetArray(workB,&w);

289:   for (i=0; i<from->n; i++) {
290:     MPI_Irecv(rvalues+ncols*rstarts[i],ncols*(rstarts[i+1]-rstarts[i]),MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);
291:   }

293:   for (i=0; i<to->n; i++) {
294:     /* pack a message at a time */
295:     CHKMEMQ;
296:     for (j=0; j<sstarts[i+1]-sstarts[i]; j++){
297:       for (k=0; k<ncols; k++) {
298:         svalues[ncols*(sstarts[i] + j) + k] = b[sindices[sstarts[i]+j] + nrowsB*k];
299:       }
300:     }
301:     CHKMEMQ;
302:     MPI_Isend(svalues+ncols*sstarts[i],ncols*(sstarts[i+1]-sstarts[i]),MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);
303:   }

305:   nrecvs = from->n;
306:   while (nrecvs) {
307:     MPI_Waitany(from->n,rwaits,&imdex,&status);
308:     nrecvs--;
309:     /* unpack a message at a time */
310:     CHKMEMQ;
311:     for (j=0; j<rstarts[imdex+1]-rstarts[imdex]; j++){
312:       for (k=0; k<ncols; k++) {
313:         w[rindices[rstarts[imdex]+j] + nrows*k] = rvalues[ncols*(rstarts[imdex] + j) + k];
314:       }
315:     }
316:     CHKMEMQ;
317:   }
318:   if (to->n) {MPI_Waitall(to->n,swaits,to->sstatus);}

320:   MatRestoreArray(B,&b);
321:   MatRestoreArray(workB,&w);
322:   MatAssemblyBegin(workB,MAT_FINAL_ASSEMBLY);
323:   MatAssemblyEnd(workB,MAT_FINAL_ASSEMBLY);
324:   return(0);
325: }

330: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C)
331: {
332:   PetscErrorCode       ierr;
333:   Mat_MPIAIJ           *aij = (Mat_MPIAIJ*)A->data;
334:   Mat_MPIDense         *bdense = (Mat_MPIDense*)B->data;
335:   Mat_MPIDense         *cdense = (Mat_MPIDense*)C->data;
336:   Mat                  workB;


340:   /* diagonal block of A times all local rows of B*/
341:   MatMatMultNumeric_SeqAIJ_SeqDense(aij->A,bdense->A,cdense->A);

343:   /* get off processor parts of B needed to complete the product */
344:   MatMPIDenseScatter(A,B,C,&workB);

346:   /* off-diagonal block of A times nonlocal rows of B */
347:   MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A);
348:   MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
349:   MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
350:   return(0);
351: }