Actual source code: mpidense.c
2: /*
3: Basic functions for basic parallel dense matrices.
4: */
6:
7: #include <../src/mat/impls/dense/mpi/mpidense.h> /*I "petscmat.h" I*/
8: #if defined(PETSC_HAVE_PLAPACK)
9: static PetscMPIInt Plapack_nprows,Plapack_npcols,Plapack_ierror,Plapack_nb_alg;
10: static MPI_Comm Plapack_comm_2d;
12: #include <PLA.h>
15: typedef struct {
16: PLA_Obj A,pivots;
17: PLA_Template templ;
18: MPI_Datatype datatype;
19: PetscInt nb,rstart;
20: VecScatter ctx;
21: IS is_pla,is_petsc;
22: PetscBool pla_solved;
23: MatStructure mstruct;
24: } Mat_Plapack;
25: #endif
29: /*@
31: MatDenseGetLocalMatrix - For a MATMPIDENSE or MATSEQDENSE matrix returns the sequential
32: matrix that represents the operator. For sequential matrices it returns itself.
34: Input Parameter:
35: . A - the Seq or MPI dense matrix
37: Output Parameter:
38: . B - the inner matrix
40: Level: intermediate
42: @*/
43: PetscErrorCode MatDenseGetLocalMatrix(Mat A,Mat *B)
44: {
45: Mat_MPIDense *mat = (Mat_MPIDense*)A->data;
47: PetscBool flg;
50: PetscTypeCompare((PetscObject)A,MATMPIDENSE,&flg);
51: if (flg) {
52: *B = mat->A;
53: } else {
54: *B = A;
55: }
56: return(0);
57: }
61: PetscErrorCode MatGetRow_MPIDense(Mat A,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
62: {
63: Mat_MPIDense *mat = (Mat_MPIDense*)A->data;
65: PetscInt lrow,rstart = A->rmap->rstart,rend = A->rmap->rend;
68: if (row < rstart || row >= rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"only local rows");
69: lrow = row - rstart;
70: MatGetRow(mat->A,lrow,nz,(const PetscInt **)idx,(const PetscScalar **)v);
71: return(0);
72: }
76: PetscErrorCode MatRestoreRow_MPIDense(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
77: {
81: if (idx) {PetscFree(*idx);}
82: if (v) {PetscFree(*v);}
83: return(0);
84: }
89: PetscErrorCode MatGetDiagonalBlock_MPIDense(Mat A,Mat *a)
90: {
91: Mat_MPIDense *mdn = (Mat_MPIDense*)A->data;
93: PetscInt m = A->rmap->n,rstart = A->rmap->rstart;
94: PetscScalar *array;
95: MPI_Comm comm;
96: Mat B;
99: if (A->rmap->N != A->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only square matrices supported.");
101: PetscObjectQuery((PetscObject)A,"DiagonalBlock",(PetscObject*)&B);
102: if (!B) {
103: PetscObjectGetComm((PetscObject)(mdn->A),&comm);
104: MatCreate(comm,&B);
105: MatSetSizes(B,m,m,m,m);
106: MatSetType(B,((PetscObject)mdn->A)->type_name);
107: MatGetArray(mdn->A,&array);
108: MatSeqDenseSetPreallocation(B,array+m*rstart);
109: MatRestoreArray(mdn->A,&array);
110: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
111: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
112: PetscObjectCompose((PetscObject)A,"DiagonalBlock",(PetscObject)B);
113: *a = B;
114: MatDestroy(&B);
115: } else {
116: *a = B;
117: }
118: return(0);
119: }
124: PetscErrorCode MatSetValues_MPIDense(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
125: {
126: Mat_MPIDense *A = (Mat_MPIDense*)mat->data;
128: PetscInt i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend,row;
129: PetscBool roworiented = A->roworiented;
133: for (i=0; i<m; i++) {
134: if (idxm[i] < 0) continue;
135: if (idxm[i] >= mat->rmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large");
136: if (idxm[i] >= rstart && idxm[i] < rend) {
137: row = idxm[i] - rstart;
138: if (roworiented) {
139: MatSetValues(A->A,1,&row,n,idxn,v+i*n,addv);
140: } else {
141: for (j=0; j<n; j++) {
142: if (idxn[j] < 0) continue;
143: if (idxn[j] >= mat->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large");
144: MatSetValues(A->A,1,&row,1,&idxn[j],v+i+j*m,addv);
145: }
146: }
147: } else {
148: if (!A->donotstash) {
149: if (roworiented) {
150: MatStashValuesRow_Private(&mat->stash,idxm[i],n,idxn,v+i*n,PETSC_FALSE);
151: } else {
152: MatStashValuesCol_Private(&mat->stash,idxm[i],n,idxn,v+i,m,PETSC_FALSE);
153: }
154: }
155: }
156: }
157: return(0);
158: }
162: PetscErrorCode MatGetValues_MPIDense(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
163: {
164: Mat_MPIDense *mdn = (Mat_MPIDense*)mat->data;
166: PetscInt i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend,row;
169: for (i=0; i<m; i++) {
170: if (idxm[i] < 0) continue; /* SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row"); */
171: if (idxm[i] >= mat->rmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large");
172: if (idxm[i] >= rstart && idxm[i] < rend) {
173: row = idxm[i] - rstart;
174: for (j=0; j<n; j++) {
175: if (idxn[j] < 0) continue; /* SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative column"); */
176: if (idxn[j] >= mat->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large");
177: MatGetValues(mdn->A,1,&row,1,&idxn[j],v+i*n+j);
178: }
179: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local values currently supported");
180: }
181: return(0);
182: }
186: PetscErrorCode MatGetArray_MPIDense(Mat A,PetscScalar *array[])
187: {
188: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
192: MatGetArray(a->A,array);
193: return(0);
194: }
198: static PetscErrorCode MatGetSubMatrix_MPIDense(Mat A,IS isrow,IS iscol,MatReuse scall,Mat *B)
199: {
200: Mat_MPIDense *mat = (Mat_MPIDense*)A->data,*newmatd;
201: Mat_SeqDense *lmat = (Mat_SeqDense*)mat->A->data;
203: PetscInt i,j,rstart,rend,nrows,ncols,Ncols,nlrows,nlcols;
204: const PetscInt *irow,*icol;
205: PetscScalar *av,*bv,*v = lmat->v;
206: Mat newmat;
207: IS iscol_local;
210: ISAllGather(iscol,&iscol_local);
211: ISGetIndices(isrow,&irow);
212: ISGetIndices(iscol_local,&icol);
213: ISGetLocalSize(isrow,&nrows);
214: ISGetLocalSize(iscol,&ncols);
215: ISGetSize(iscol,&Ncols); /* global number of columns, size of iscol_local */
217: /* No parallel redistribution currently supported! Should really check each index set
218: to comfirm that it is OK. ... Currently supports only submatrix same partitioning as
219: original matrix! */
221: MatGetLocalSize(A,&nlrows,&nlcols);
222: MatGetOwnershipRange(A,&rstart,&rend);
223:
224: /* Check submatrix call */
225: if (scall == MAT_REUSE_MATRIX) {
226: /* SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Reused submatrix wrong size"); */
227: /* Really need to test rows and column sizes! */
228: newmat = *B;
229: } else {
230: /* Create and fill new matrix */
231: MatCreate(((PetscObject)A)->comm,&newmat);
232: MatSetSizes(newmat,nrows,ncols,PETSC_DECIDE,Ncols);
233: MatSetType(newmat,((PetscObject)A)->type_name);
234: MatMPIDenseSetPreallocation(newmat,PETSC_NULL);
235: }
237: /* Now extract the data pointers and do the copy, column at a time */
238: newmatd = (Mat_MPIDense*)newmat->data;
239: bv = ((Mat_SeqDense *)newmatd->A->data)->v;
240:
241: for (i=0; i<Ncols; i++) {
242: av = v + ((Mat_SeqDense *)mat->A->data)->lda*icol[i];
243: for (j=0; j<nrows; j++) {
244: *bv++ = av[irow[j] - rstart];
245: }
246: }
248: /* Assemble the matrices so that the correct flags are set */
249: MatAssemblyBegin(newmat,MAT_FINAL_ASSEMBLY);
250: MatAssemblyEnd(newmat,MAT_FINAL_ASSEMBLY);
252: /* Free work space */
253: ISRestoreIndices(isrow,&irow);
254: ISRestoreIndices(iscol_local,&icol);
255: ISDestroy(&iscol_local);
256: *B = newmat;
257: return(0);
258: }
262: PetscErrorCode MatRestoreArray_MPIDense(Mat A,PetscScalar *array[])
263: {
265: return(0);
266: }
270: PetscErrorCode MatAssemblyBegin_MPIDense(Mat mat,MatAssemblyType mode)
271: {
272: Mat_MPIDense *mdn = (Mat_MPIDense*)mat->data;
273: MPI_Comm comm = ((PetscObject)mat)->comm;
275: PetscInt nstash,reallocs;
276: InsertMode addv;
279: /* make sure all processors are either in INSERTMODE or ADDMODE */
280: MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,comm);
281: if (addv == (ADD_VALUES|INSERT_VALUES)) SETERRQ(((PetscObject)mat)->comm,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix adds/inserts on different procs");
282: mat->insertmode = addv; /* in case this processor had no cache */
284: MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);
285: MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
286: PetscInfo2(mdn->A,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
287: return(0);
288: }
292: PetscErrorCode MatAssemblyEnd_MPIDense(Mat mat,MatAssemblyType mode)
293: {
294: Mat_MPIDense *mdn=(Mat_MPIDense*)mat->data;
295: PetscErrorCode ierr;
296: PetscInt i,*row,*col,flg,j,rstart,ncols;
297: PetscMPIInt n;
298: PetscScalar *val;
299: InsertMode addv=mat->insertmode;
302: /* wait on receives */
303: while (1) {
304: MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
305: if (!flg) break;
306:
307: for (i=0; i<n;) {
308: /* Now identify the consecutive vals belonging to the same row */
309: for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
310: if (j < n) ncols = j-i;
311: else ncols = n-i;
312: /* Now assemble all these values with a single function call */
313: MatSetValues_MPIDense(mat,1,row+i,ncols,col+i,val+i,addv);
314: i = j;
315: }
316: }
317: MatStashScatterEnd_Private(&mat->stash);
318:
319: MatAssemblyBegin(mdn->A,mode);
320: MatAssemblyEnd(mdn->A,mode);
322: if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
323: MatSetUpMultiply_MPIDense(mat);
324: }
325: return(0);
326: }
330: PetscErrorCode MatZeroEntries_MPIDense(Mat A)
331: {
333: Mat_MPIDense *l = (Mat_MPIDense*)A->data;
336: MatZeroEntries(l->A);
337: return(0);
338: }
340: /* the code does not do the diagonal entries correctly unless the
341: matrix is square and the column and row owerships are identical.
342: This is a BUG. The only way to fix it seems to be to access
343: mdn->A and mdn->B directly and not through the MatZeroRows()
344: routine.
345: */
348: PetscErrorCode MatZeroRows_MPIDense(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
349: {
350: Mat_MPIDense *l = (Mat_MPIDense*)A->data;
351: PetscErrorCode ierr;
352: PetscInt i,*owners = A->rmap->range;
353: PetscInt *nprocs,j,idx,nsends;
354: PetscInt nmax,*svalues,*starts,*owner,nrecvs;
355: PetscInt *rvalues,tag = ((PetscObject)A)->tag,count,base,slen,*source;
356: PetscInt *lens,*lrows,*values;
357: PetscMPIInt n,imdex,rank = l->rank,size = l->size;
358: MPI_Comm comm = ((PetscObject)A)->comm;
359: MPI_Request *send_waits,*recv_waits;
360: MPI_Status recv_status,*send_status;
361: PetscBool found;
362: const PetscScalar *xx;
363: PetscScalar *bb;
366: if (A->rmap->N != A->cmap->N) SETERRQ(((PetscObject)A)->comm,PETSC_ERR_SUP,"Only handles square matrices");
367: if (A->rmap->n != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only handles matrices with identical column and row ownership");
368: /* first count number of contributors to each processor */
369: PetscMalloc(2*size*sizeof(PetscInt),&nprocs);
370: PetscMemzero(nprocs,2*size*sizeof(PetscInt));
371: PetscMalloc((N+1)*sizeof(PetscInt),&owner); /* see note*/
372: for (i=0; i<N; i++) {
373: idx = rows[i];
374: found = PETSC_FALSE;
375: for (j=0; j<size; j++) {
376: if (idx >= owners[j] && idx < owners[j+1]) {
377: nprocs[2*j]++; nprocs[2*j+1] = 1; owner[i] = j; found = PETSC_TRUE; break;
378: }
379: }
380: if (!found) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index out of range");
381: }
382: nsends = 0; for (i=0; i<size; i++) { nsends += nprocs[2*i+1];}
384: /* inform other processors of number of messages and max length*/
385: PetscMaxSum(comm,nprocs,&nmax,&nrecvs);
387: /* post receives: */
388: PetscMalloc((nrecvs+1)*(nmax+1)*sizeof(PetscInt),&rvalues);
389: PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);
390: for (i=0; i<nrecvs; i++) {
391: MPI_Irecv(rvalues+nmax*i,nmax,MPIU_INT,MPI_ANY_SOURCE,tag,comm,recv_waits+i);
392: }
394: /* do sends:
395: 1) starts[i] gives the starting index in svalues for stuff going to
396: the ith processor
397: */
398: PetscMalloc((N+1)*sizeof(PetscInt),&svalues);
399: PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);
400: PetscMalloc((size+1)*sizeof(PetscInt),&starts);
401: starts[0] = 0;
402: for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];}
403: for (i=0; i<N; i++) {
404: svalues[starts[owner[i]]++] = rows[i];
405: }
407: starts[0] = 0;
408: for (i=1; i<size+1; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];}
409: count = 0;
410: for (i=0; i<size; i++) {
411: if (nprocs[2*i+1]) {
412: MPI_Isend(svalues+starts[i],nprocs[2*i],MPIU_INT,i,tag,comm,send_waits+count++);
413: }
414: }
415: PetscFree(starts);
417: base = owners[rank];
419: /* wait on receives */
420: PetscMalloc2(nrecvs,PetscInt,&lens,nrecvs,PetscInt,&source);
421: count = nrecvs;
422: slen = 0;
423: while (count) {
424: MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);
425: /* unpack receives into our local space */
426: MPI_Get_count(&recv_status,MPIU_INT,&n);
427: source[imdex] = recv_status.MPI_SOURCE;
428: lens[imdex] = n;
429: slen += n;
430: count--;
431: }
432: PetscFree(recv_waits);
433:
434: /* move the data into the send scatter */
435: PetscMalloc((slen+1)*sizeof(PetscInt),&lrows);
436: count = 0;
437: for (i=0; i<nrecvs; i++) {
438: values = rvalues + i*nmax;
439: for (j=0; j<lens[i]; j++) {
440: lrows[count++] = values[j] - base;
441: }
442: }
443: PetscFree(rvalues);
444: PetscFree2(lens,source);
445: PetscFree(owner);
446: PetscFree(nprocs);
447:
448: /* fix right hand side if needed */
449: if (x && b) {
450: VecGetArrayRead(x,&xx);
451: VecGetArray(b,&bb);
452: for (i=0; i<slen; i++) {
453: bb[lrows[i]] = diag*xx[lrows[i]];
454: }
455: VecRestoreArrayRead(x,&xx);
456: VecRestoreArray(b,&bb);
457: }
459: /* actually zap the local rows */
460: MatZeroRows(l->A,slen,lrows,0.0,0,0);
461: if (diag != 0.0) {
462: Mat_SeqDense *ll = (Mat_SeqDense*)l->A->data;
463: PetscInt m = ll->lda, i;
464:
465: for (i=0; i<slen; i++) {
466: ll->v[lrows[i] + m*(A->cmap->rstart + lrows[i])] = diag;
467: }
468: }
469: PetscFree(lrows);
471: /* wait on sends */
472: if (nsends) {
473: PetscMalloc(nsends*sizeof(MPI_Status),&send_status);
474: MPI_Waitall(nsends,send_waits,send_status);
475: PetscFree(send_status);
476: }
477: PetscFree(send_waits);
478: PetscFree(svalues);
480: return(0);
481: }
485: PetscErrorCode MatMult_MPIDense(Mat mat,Vec xx,Vec yy)
486: {
487: Mat_MPIDense *mdn = (Mat_MPIDense*)mat->data;
491: VecScatterBegin(mdn->Mvctx,xx,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD);
492: VecScatterEnd(mdn->Mvctx,xx,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD);
493: MatMult_SeqDense(mdn->A,mdn->lvec,yy);
494: return(0);
495: }
499: PetscErrorCode MatMultAdd_MPIDense(Mat mat,Vec xx,Vec yy,Vec zz)
500: {
501: Mat_MPIDense *mdn = (Mat_MPIDense*)mat->data;
505: VecScatterBegin(mdn->Mvctx,xx,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD);
506: VecScatterEnd(mdn->Mvctx,xx,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD);
507: MatMultAdd_SeqDense(mdn->A,mdn->lvec,yy,zz);
508: return(0);
509: }
513: PetscErrorCode MatMultTranspose_MPIDense(Mat A,Vec xx,Vec yy)
514: {
515: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
517: PetscScalar zero = 0.0;
520: VecSet(yy,zero);
521: MatMultTranspose_SeqDense(a->A,xx,a->lvec);
522: VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
523: VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
524: return(0);
525: }
529: PetscErrorCode MatMultTransposeAdd_MPIDense(Mat A,Vec xx,Vec yy,Vec zz)
530: {
531: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
535: VecCopy(yy,zz);
536: MatMultTranspose_SeqDense(a->A,xx,a->lvec);
537: VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
538: VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
539: return(0);
540: }
544: PetscErrorCode MatGetDiagonal_MPIDense(Mat A,Vec v)
545: {
546: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
547: Mat_SeqDense *aloc = (Mat_SeqDense*)a->A->data;
549: PetscInt len,i,n,m = A->rmap->n,radd;
550: PetscScalar *x,zero = 0.0;
551:
553: VecSet(v,zero);
554: VecGetArray(v,&x);
555: VecGetSize(v,&n);
556: if (n != A->rmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Nonconforming mat and vec");
557: len = PetscMin(a->A->rmap->n,a->A->cmap->n);
558: radd = A->rmap->rstart*m;
559: for (i=0; i<len; i++) {
560: x[i] = aloc->v[radd + i*m + i];
561: }
562: VecRestoreArray(v,&x);
563: return(0);
564: }
568: PetscErrorCode MatDestroy_MPIDense(Mat mat)
569: {
570: Mat_MPIDense *mdn = (Mat_MPIDense*)mat->data;
572: #if defined(PETSC_HAVE_PLAPACK)
573: Mat_Plapack *lu=(Mat_Plapack*)mat->spptr;
574: #endif
578: #if defined(PETSC_USE_LOG)
579: PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D",mat->rmap->N,mat->cmap->N);
580: #endif
581: MatStashDestroy_Private(&mat->stash);
582: MatDestroy(&mdn->A);
583: VecDestroy(&mdn->lvec);
584: VecScatterDestroy(&mdn->Mvctx);
585: #if defined(PETSC_HAVE_PLAPACK)
586: if (lu) {
587: PLA_Obj_free(&lu->A);
588: PLA_Obj_free (&lu->pivots);
589: PLA_Temp_free(&lu->templ);
590: ISDestroy(&lu->is_pla);
591: ISDestroy(&lu->is_petsc);
592: VecScatterDestroy(&lu->ctx);
593: }
594: PetscFree(mat->spptr);
595: #endif
597: PetscFree(mat->data);
598: PetscObjectChangeTypeName((PetscObject)mat,0);
599: PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatGetDiagonalBlock_C","",PETSC_NULL);
600: PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatMPIDenseSetPreallocation_C","",PETSC_NULL);
601: PetscObjectComposeFunction((PetscObject)mat,"MatMatMult_mpiaij_mpidense_C","",PETSC_NULL);
602: PetscObjectComposeFunction((PetscObject)mat,"MatMatMultSymbolic_mpiaij_mpidense_C","",PETSC_NULL);
603: PetscObjectComposeFunction((PetscObject)mat,"MatMatMultNumeric_mpiaij_mpidense_C","",PETSC_NULL);
604: return(0);
605: }
609: static PetscErrorCode MatView_MPIDense_Binary(Mat mat,PetscViewer viewer)
610: {
611: Mat_MPIDense *mdn = (Mat_MPIDense*)mat->data;
612: PetscErrorCode ierr;
613: PetscViewerFormat format;
614: int fd;
615: PetscInt header[4],mmax,N = mat->cmap->N,i,j,m,k;
616: PetscMPIInt rank,tag = ((PetscObject)viewer)->tag,size;
617: PetscScalar *work,*v,*vv;
618: Mat_SeqDense *a = (Mat_SeqDense*)mdn->A->data;
621: if (mdn->size == 1) {
622: MatView(mdn->A,viewer);
623: } else {
624: PetscViewerBinaryGetDescriptor(viewer,&fd);
625: MPI_Comm_rank(((PetscObject)mat)->comm,&rank);
626: MPI_Comm_size(((PetscObject)mat)->comm,&size);
628: PetscViewerGetFormat(viewer,&format);
629: if (format == PETSC_VIEWER_NATIVE) {
631: if (!rank) {
632: /* store the matrix as a dense matrix */
633: header[0] = MAT_FILE_CLASSID;
634: header[1] = mat->rmap->N;
635: header[2] = N;
636: header[3] = MATRIX_BINARY_FORMAT_DENSE;
637: PetscBinaryWrite(fd,header,4,PETSC_INT,PETSC_TRUE);
639: /* get largest work array needed for transposing array */
640: mmax = mat->rmap->n;
641: for (i=1; i<size; i++) {
642: mmax = PetscMax(mmax,mat->rmap->range[i+1] - mat->rmap->range[i]);
643: }
644: PetscMalloc(mmax*N*sizeof(PetscScalar),&work);
646: /* write out local array, by rows */
647: m = mat->rmap->n;
648: v = a->v;
649: for (j=0; j<N; j++) {
650: for (i=0; i<m; i++) {
651: work[j + i*N] = *v++;
652: }
653: }
654: PetscBinaryWrite(fd,work,m*N,PETSC_SCALAR,PETSC_FALSE);
655: /* get largest work array to receive messages from other processes, excludes process zero */
656: mmax = 0;
657: for (i=1; i<size; i++) {
658: mmax = PetscMax(mmax,mat->rmap->range[i+1] - mat->rmap->range[i]);
659: }
660: PetscMalloc(mmax*N*sizeof(PetscScalar),&vv);
661: for(k = 1; k < size; k++) {
662: v = vv;
663: m = mat->rmap->range[k+1] - mat->rmap->range[k];
664: MPILong_Recv(v,m*N,MPIU_SCALAR,k,tag,((PetscObject)mat)->comm);
666: for(j = 0; j < N; j++) {
667: for(i = 0; i < m; i++) {
668: work[j + i*N] = *v++;
669: }
670: }
671: PetscBinaryWrite(fd,work,m*N,PETSC_SCALAR,PETSC_FALSE);
672: }
673: PetscFree(work);
674: PetscFree(vv);
675: } else {
676: MPILong_Send(a->v,mat->rmap->n*mat->cmap->N,MPIU_SCALAR,0,tag,((PetscObject)mat)->comm);
677: }
678: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"To store a parallel dense matrix you must first call PetscViewerSetFormat(viewer,PETSC_VIEWER_NATIVE)");
679: }
680: return(0);
681: }
685: static PetscErrorCode MatView_MPIDense_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
686: {
687: Mat_MPIDense *mdn = (Mat_MPIDense*)mat->data;
688: PetscErrorCode ierr;
689: PetscMPIInt size = mdn->size,rank = mdn->rank;
690: const PetscViewerType vtype;
691: PetscBool iascii,isdraw;
692: PetscViewer sviewer;
693: PetscViewerFormat format;
694: #if defined(PETSC_HAVE_PLAPACK)
695: Mat_Plapack *lu;
696: #endif
699: PetscTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
700: PetscTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
701: if (iascii) {
702: PetscViewerGetType(viewer,&vtype);
703: PetscViewerGetFormat(viewer,&format);
704: if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
705: MatInfo info;
706: MatGetInfo(mat,MAT_LOCAL,&info);
707: PetscViewerASCIISynchronizedAllow(viewer,PETSC_TRUE);
708: PetscViewerASCIISynchronizedPrintf(viewer," [%d] local rows %D nz %D nz alloced %D mem %D \n",rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(PetscInt)info.memory);
709: PetscViewerFlush(viewer);
710: PetscViewerASCIISynchronizedAllow(viewer,PETSC_FALSE);
711: #if defined(PETSC_HAVE_PLAPACK)
712: PetscViewerASCIIPrintf(viewer,"PLAPACK run parameters:\n");
713: PetscViewerASCIIPrintf(viewer," Processor mesh: nprows %d, npcols %d\n",Plapack_nprows, Plapack_npcols);
714: PetscViewerASCIIPrintf(viewer," Error checking: %d\n",Plapack_ierror);
715: PetscViewerASCIIPrintf(viewer," Algorithmic block size: %d\n",Plapack_nb_alg);
716: if (mat->factortype){
717: lu=(Mat_Plapack*)(mat->spptr);
718: PetscViewerASCIIPrintf(viewer," Distr. block size nb: %d \n",lu->nb);
719: }
720: #else
721: VecScatterView(mdn->Mvctx,viewer);
722: #endif
723: return(0);
724: } else if (format == PETSC_VIEWER_ASCII_INFO) {
725: return(0);
726: }
727: } else if (isdraw) {
728: PetscDraw draw;
729: PetscBool isnull;
731: PetscViewerDrawGetDraw(viewer,0,&draw);
732: PetscDrawIsNull(draw,&isnull);
733: if (isnull) return(0);
734: }
736: if (size == 1) {
737: MatView(mdn->A,viewer);
738: } else {
739: /* assemble the entire matrix onto first processor. */
740: Mat A;
741: PetscInt M = mat->rmap->N,N = mat->cmap->N,m,row,i,nz;
742: PetscInt *cols;
743: PetscScalar *vals;
745: MatCreate(((PetscObject)mat)->comm,&A);
746: if (!rank) {
747: MatSetSizes(A,M,N,M,N);
748: } else {
749: MatSetSizes(A,0,0,M,N);
750: }
751: /* Since this is a temporary matrix, MATMPIDENSE instead of ((PetscObject)A)->type_name here is probably acceptable. */
752: MatSetType(A,MATMPIDENSE);
753: MatMPIDenseSetPreallocation(A,PETSC_NULL);
754: PetscLogObjectParent(mat,A);
756: /* Copy the matrix ... This isn't the most efficient means,
757: but it's quick for now */
758: A->insertmode = INSERT_VALUES;
759: row = mat->rmap->rstart; m = mdn->A->rmap->n;
760: for (i=0; i<m; i++) {
761: MatGetRow_MPIDense(mat,row,&nz,&cols,&vals);
762: MatSetValues_MPIDense(A,1,&row,nz,cols,vals,INSERT_VALUES);
763: MatRestoreRow_MPIDense(mat,row,&nz,&cols,&vals);
764: row++;
765: }
767: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
768: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
769: PetscViewerGetSingleton(viewer,&sviewer);
770: if (!rank) {
771: PetscObjectSetName((PetscObject)((Mat_MPIDense*)(A->data))->A,((PetscObject)mat)->name);
772: /* Set the type name to MATMPIDense so that the correct type can be printed out by PetscObjectPrintClassNamePrefixType() in MatView_SeqDense_ASCII()*/
773: PetscStrcpy(((PetscObject)((Mat_MPIDense*)(A->data))->A)->type_name,MATMPIDENSE);
774: MatView(((Mat_MPIDense*)(A->data))->A,sviewer);
775: }
776: PetscViewerRestoreSingleton(viewer,&sviewer);
777: PetscViewerFlush(viewer);
778: MatDestroy(&A);
779: }
780: return(0);
781: }
785: PetscErrorCode MatView_MPIDense(Mat mat,PetscViewer viewer)
786: {
788: PetscBool iascii,isbinary,isdraw,issocket;
789:
791:
792: PetscTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
793: PetscTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
794: PetscTypeCompare((PetscObject)viewer,PETSCVIEWERSOCKET,&issocket);
795: PetscTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
797: if (iascii || issocket || isdraw) {
798: MatView_MPIDense_ASCIIorDraworSocket(mat,viewer);
799: } else if (isbinary) {
800: MatView_MPIDense_Binary(mat,viewer);
801: } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Viewer type %s not supported by MPI dense matrix",((PetscObject)viewer)->type_name);
802: return(0);
803: }
807: PetscErrorCode MatGetInfo_MPIDense(Mat A,MatInfoType flag,MatInfo *info)
808: {
809: Mat_MPIDense *mat = (Mat_MPIDense*)A->data;
810: Mat mdn = mat->A;
812: PetscReal isend[5],irecv[5];
815: info->block_size = 1.0;
816: MatGetInfo(mdn,MAT_LOCAL,info);
817: isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
818: isend[3] = info->memory; isend[4] = info->mallocs;
819: if (flag == MAT_LOCAL) {
820: info->nz_used = isend[0];
821: info->nz_allocated = isend[1];
822: info->nz_unneeded = isend[2];
823: info->memory = isend[3];
824: info->mallocs = isend[4];
825: } else if (flag == MAT_GLOBAL_MAX) {
826: MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_MAX,((PetscObject)A)->comm);
827: info->nz_used = irecv[0];
828: info->nz_allocated = irecv[1];
829: info->nz_unneeded = irecv[2];
830: info->memory = irecv[3];
831: info->mallocs = irecv[4];
832: } else if (flag == MAT_GLOBAL_SUM) {
833: MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_SUM,((PetscObject)A)->comm);
834: info->nz_used = irecv[0];
835: info->nz_allocated = irecv[1];
836: info->nz_unneeded = irecv[2];
837: info->memory = irecv[3];
838: info->mallocs = irecv[4];
839: }
840: info->fill_ratio_given = 0; /* no parallel LU/ILU/Cholesky */
841: info->fill_ratio_needed = 0;
842: info->factor_mallocs = 0;
843: return(0);
844: }
848: PetscErrorCode MatSetOption_MPIDense(Mat A,MatOption op,PetscBool flg)
849: {
850: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
854: switch (op) {
855: case MAT_NEW_NONZERO_LOCATIONS:
856: case MAT_NEW_NONZERO_LOCATION_ERR:
857: case MAT_NEW_NONZERO_ALLOCATION_ERR:
858: MatSetOption(a->A,op,flg);
859: break;
860: case MAT_ROW_ORIENTED:
861: a->roworiented = flg;
862: MatSetOption(a->A,op,flg);
863: break;
864: case MAT_NEW_DIAGONALS:
865: case MAT_KEEP_NONZERO_PATTERN:
866: case MAT_USE_HASH_TABLE:
867: PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
868: break;
869: case MAT_IGNORE_OFF_PROC_ENTRIES:
870: a->donotstash = flg;
871: break;
872: case MAT_SYMMETRIC:
873: case MAT_STRUCTURALLY_SYMMETRIC:
874: case MAT_HERMITIAN:
875: case MAT_SYMMETRY_ETERNAL:
876: case MAT_IGNORE_LOWER_TRIANGULAR:
877: PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
878: break;
879: default:
880: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unknown option %s",MatOptions[op]);
881: }
882: return(0);
883: }
888: PetscErrorCode MatDiagonalScale_MPIDense(Mat A,Vec ll,Vec rr)
889: {
890: Mat_MPIDense *mdn = (Mat_MPIDense*)A->data;
891: Mat_SeqDense *mat = (Mat_SeqDense*)mdn->A->data;
892: PetscScalar *l,*r,x,*v;
894: PetscInt i,j,s2a,s3a,s2,s3,m=mdn->A->rmap->n,n=mdn->A->cmap->n;
897: MatGetLocalSize(A,&s2,&s3);
898: if (ll) {
899: VecGetLocalSize(ll,&s2a);
900: if (s2a != s2) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Left scaling vector non-conforming local size, %d != %d.", s2a, s2);
901: VecGetArray(ll,&l);
902: for (i=0; i<m; i++) {
903: x = l[i];
904: v = mat->v + i;
905: for (j=0; j<n; j++) { (*v) *= x; v+= m;}
906: }
907: VecRestoreArray(ll,&l);
908: PetscLogFlops(n*m);
909: }
910: if (rr) {
911: VecGetLocalSize(rr,&s3a);
912: if (s3a != s3) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Right scaling vec non-conforming local size, %d != %d.", s3a, s3);
913: VecScatterBegin(mdn->Mvctx,rr,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD);
914: VecScatterEnd(mdn->Mvctx,rr,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD);
915: VecGetArray(mdn->lvec,&r);
916: for (i=0; i<n; i++) {
917: x = r[i];
918: v = mat->v + i*m;
919: for (j=0; j<m; j++) { (*v++) *= x;}
920: }
921: VecRestoreArray(mdn->lvec,&r);
922: PetscLogFlops(n*m);
923: }
924: return(0);
925: }
929: PetscErrorCode MatNorm_MPIDense(Mat A,NormType type,PetscReal *nrm)
930: {
931: Mat_MPIDense *mdn = (Mat_MPIDense*)A->data;
932: Mat_SeqDense *mat = (Mat_SeqDense*)mdn->A->data;
934: PetscInt i,j;
935: PetscReal sum = 0.0;
936: PetscScalar *v = mat->v;
939: if (mdn->size == 1) {
940: MatNorm(mdn->A,type,nrm);
941: } else {
942: if (type == NORM_FROBENIUS) {
943: for (i=0; i<mdn->A->cmap->n*mdn->A->rmap->n; i++) {
944: #if defined(PETSC_USE_COMPLEX)
945: sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
946: #else
947: sum += (*v)*(*v); v++;
948: #endif
949: }
950: MPI_Allreduce(&sum,nrm,1,MPIU_REAL,MPIU_SUM,((PetscObject)A)->comm);
951: *nrm = PetscSqrtReal(*nrm);
952: PetscLogFlops(2.0*mdn->A->cmap->n*mdn->A->rmap->n);
953: } else if (type == NORM_1) {
954: PetscReal *tmp,*tmp2;
955: PetscMalloc2(A->cmap->N,PetscReal,&tmp,A->cmap->N,PetscReal,&tmp2);
956: PetscMemzero(tmp,A->cmap->N*sizeof(PetscReal));
957: PetscMemzero(tmp2,A->cmap->N*sizeof(PetscReal));
958: *nrm = 0.0;
959: v = mat->v;
960: for (j=0; j<mdn->A->cmap->n; j++) {
961: for (i=0; i<mdn->A->rmap->n; i++) {
962: tmp[j] += PetscAbsScalar(*v); v++;
963: }
964: }
965: MPI_Allreduce(tmp,tmp2,A->cmap->N,MPIU_REAL,MPIU_SUM,((PetscObject)A)->comm);
966: for (j=0; j<A->cmap->N; j++) {
967: if (tmp2[j] > *nrm) *nrm = tmp2[j];
968: }
969: PetscFree2(tmp,tmp);
970: PetscLogFlops(A->cmap->n*A->rmap->n);
971: } else if (type == NORM_INFINITY) { /* max row norm */
972: PetscReal ntemp;
973: MatNorm(mdn->A,type,&ntemp);
974: MPI_Allreduce(&ntemp,nrm,1,MPIU_REAL,MPIU_MAX,((PetscObject)A)->comm);
975: } else SETERRQ(((PetscObject)A)->comm,PETSC_ERR_SUP,"No support for two norm");
976: }
977: return(0);
978: }
982: PetscErrorCode MatTranspose_MPIDense(Mat A,MatReuse reuse,Mat *matout)
983: {
984: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
985: Mat_SeqDense *Aloc = (Mat_SeqDense*)a->A->data;
986: Mat B;
987: PetscInt M = A->rmap->N,N = A->cmap->N,m,n,*rwork,rstart = A->rmap->rstart;
989: PetscInt j,i;
990: PetscScalar *v;
993: if (reuse == MAT_REUSE_MATRIX && A == *matout && M != N) SETERRQ(((PetscObject)A)->comm,PETSC_ERR_SUP,"Supports square matrix only in-place");
994: if (reuse == MAT_INITIAL_MATRIX || A == *matout) {
995: MatCreate(((PetscObject)A)->comm,&B);
996: MatSetSizes(B,A->cmap->n,A->rmap->n,N,M);
997: MatSetType(B,((PetscObject)A)->type_name);
998: MatMPIDenseSetPreallocation(B,PETSC_NULL);
999: } else {
1000: B = *matout;
1001: }
1003: m = a->A->rmap->n; n = a->A->cmap->n; v = Aloc->v;
1004: PetscMalloc(m*sizeof(PetscInt),&rwork);
1005: for (i=0; i<m; i++) rwork[i] = rstart + i;
1006: for (j=0; j<n; j++) {
1007: MatSetValues(B,1,&j,m,rwork,v,INSERT_VALUES);
1008: v += m;
1009: }
1010: PetscFree(rwork);
1011: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
1012: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
1013: if (reuse == MAT_INITIAL_MATRIX || *matout != A) {
1014: *matout = B;
1015: } else {
1016: MatHeaderMerge(A,B);
1017: }
1018: return(0);
1019: }
1022: static PetscErrorCode MatDuplicate_MPIDense(Mat,MatDuplicateOption,Mat *);
1027: PetscErrorCode MatSetUpPreallocation_MPIDense(Mat A)
1028: {
1032: MatMPIDenseSetPreallocation(A,0);
1033: return(0);
1034: }
1036: #if defined(PETSC_HAVE_PLAPACK)
1040: PetscErrorCode MatMPIDenseCopyToPlapack(Mat A,Mat F)
1041: {
1042: Mat_Plapack *lu = (Mat_Plapack*)(F)->spptr;
1044: PetscInt M=A->cmap->N,m=A->rmap->n,rstart;
1045: PetscScalar *array;
1046: PetscReal one = 1.0;
1049: /* Copy A into F->lu->A */
1050: PLA_Obj_set_to_zero(lu->A);
1051: PLA_API_begin();
1052: PLA_Obj_API_open(lu->A);
1053: MatGetOwnershipRange(A,&rstart,PETSC_NULL);
1054: MatGetArray(A,&array);
1055: PLA_API_axpy_matrix_to_global(m,M, &one,(void *)array,m,lu->A,rstart,0);
1056: MatRestoreArray(A,&array);
1057: PLA_Obj_API_close(lu->A);
1058: PLA_API_end();
1059: lu->rstart = rstart;
1060: return(0);
1061: }
1065: PetscErrorCode MatMPIDenseCopyFromPlapack(Mat F,Mat A)
1066: {
1067: Mat_Plapack *lu = (Mat_Plapack*)(F)->spptr;
1069: PetscInt M=A->cmap->N,m=A->rmap->n,rstart;
1070: PetscScalar *array;
1071: PetscReal one = 1.0;
1074: /* Copy F into A->lu->A */
1075: MatZeroEntries(A);
1076: PLA_API_begin();
1077: PLA_Obj_API_open(lu->A);
1078: MatGetOwnershipRange(A,&rstart,PETSC_NULL);
1079: MatGetArray(A,&array);
1080: PLA_API_axpy_global_to_matrix(m,M, &one,lu->A,rstart,0,(void *)array,m);
1081: MatRestoreArray(A,&array);
1082: PLA_Obj_API_close(lu->A);
1083: PLA_API_end();
1084: lu->rstart = rstart;
1085: return(0);
1086: }
1090: PetscErrorCode MatMatMultNumeric_MPIDense_MPIDense(Mat A,Mat B,Mat C)
1091: {
1093: Mat_Plapack *luA = (Mat_Plapack*)A->spptr;
1094: Mat_Plapack *luB = (Mat_Plapack*)B->spptr;
1095: Mat_Plapack *luC = (Mat_Plapack*)C->spptr;
1096: PLA_Obj alpha = NULL,beta = NULL;
1099: MatMPIDenseCopyToPlapack(A,A);
1100: MatMPIDenseCopyToPlapack(B,B);
1102: /*
1103: PLA_Global_show("A = ",luA->A,"%g ","");
1104: PLA_Global_show("B = ",luB->A,"%g ","");
1105: */
1107: /* do the multiply in PLA */
1108: PLA_Create_constants_conf_to(luA->A,NULL,NULL,&alpha);
1109: PLA_Create_constants_conf_to(luC->A,NULL,&beta,NULL);
1110: CHKMEMQ;
1112: PLA_Gemm(PLA_NO_TRANSPOSE,PLA_NO_TRANSPOSE,alpha,luA->A,luB->A,beta,luC->A); /* */
1113: CHKMEMQ;
1114: PLA_Obj_free(&alpha);
1115: PLA_Obj_free(&beta);
1117: /*
1118: PLA_Global_show("C = ",luC->A,"%g ","");
1119: */
1120: MatMPIDenseCopyFromPlapack(C,C);
1121: return(0);
1122: }
1126: PetscErrorCode MatMatMultSymbolic_MPIDense_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C)
1127: {
1129: PetscInt m=A->rmap->n,n=B->cmap->n;
1130: Mat Cmat;
1133: if (A->cmap->n != B->rmap->n) SETERRQ2(((PetscObject)A)->comm,PETSC_ERR_ARG_SIZ,"A->cmap->n %d != B->rmap->n %d\n",A->cmap->n,B->rmap->n);
1134: SETERRQ(((PetscObject)A)->comm,PETSC_ERR_LIB,"Due to apparent bugs in PLAPACK,this is not currently supported");
1135: MatCreate(((PetscObject)B)->comm,&Cmat);
1136: MatSetSizes(Cmat,m,n,A->rmap->N,B->cmap->N);
1137: MatSetType(Cmat,MATMPIDENSE);
1138: MatAssemblyBegin(Cmat,MAT_FINAL_ASSEMBLY);
1139: MatAssemblyEnd(Cmat,MAT_FINAL_ASSEMBLY);
1141: *C = Cmat;
1142: return(0);
1143: }
1147: PetscErrorCode MatMatMult_MPIDense_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
1148: {
1152: if (scall == MAT_INITIAL_MATRIX){
1153: MatMatMultSymbolic_MPIDense_MPIDense(A,B,fill,C);
1154: }
1155: MatMatMultNumeric_MPIDense_MPIDense(A,B,*C);
1156: return(0);
1157: }
1161: PetscErrorCode MatSolve_MPIDense(Mat A,Vec b,Vec x)
1162: {
1163: MPI_Comm comm = ((PetscObject)A)->comm;
1164: Mat_Plapack *lu = (Mat_Plapack*)A->spptr;
1166: PetscInt M=A->rmap->N,m=A->rmap->n,rstart,i,j,*idx_pla,*idx_petsc,loc_m,loc_stride;
1167: PetscScalar *array;
1168: PetscReal one = 1.0;
1169: PetscMPIInt size,rank,r_rank,r_nproc,c_rank,c_nproc;;
1170: PLA_Obj v_pla = NULL;
1171: PetscScalar *loc_buf;
1172: Vec loc_x;
1173:
1175: MPI_Comm_size(comm,&size);
1176: MPI_Comm_rank(comm,&rank);
1178: /* Create PLAPACK vector objects, then copy b into PLAPACK b */
1179: PLA_Mvector_create(lu->datatype,M,1,lu->templ,PLA_ALIGN_FIRST,&v_pla);
1180: PLA_Obj_set_to_zero(v_pla);
1182: /* Copy b into rhs_pla */
1183: PLA_API_begin();
1184: PLA_Obj_API_open(v_pla);
1185: VecGetArray(b,&array);
1186: PLA_API_axpy_vector_to_global(m,&one,(void *)array,1,v_pla,lu->rstart);
1187: VecRestoreArray(b,&array);
1188: PLA_Obj_API_close(v_pla);
1189: PLA_API_end();
1191: if (A->factortype == MAT_FACTOR_LU){
1192: /* Apply the permutations to the right hand sides */
1193: PLA_Apply_pivots_to_rows (v_pla,lu->pivots);
1195: /* Solve L y = b, overwriting b with y */
1196: PLA_Trsv( PLA_LOWER_TRIANGULAR,PLA_NO_TRANSPOSE,PLA_UNIT_DIAG,lu->A,v_pla );
1198: /* Solve U x = y (=b), overwriting b with x */
1199: PLA_Trsv( PLA_UPPER_TRIANGULAR,PLA_NO_TRANSPOSE,PLA_NONUNIT_DIAG,lu->A,v_pla );
1200: } else { /* MAT_FACTOR_CHOLESKY */
1201: PLA_Trsv( PLA_LOWER_TRIANGULAR,PLA_NO_TRANSPOSE,PLA_NONUNIT_DIAG,lu->A,v_pla);
1202: PLA_Trsv( PLA_LOWER_TRIANGULAR,(lu->datatype == MPI_DOUBLE ? PLA_TRANSPOSE : PLA_CONJUGATE_TRANSPOSE),
1203: PLA_NONUNIT_DIAG,lu->A,v_pla);
1204: }
1206: /* Copy PLAPACK x into Petsc vector x */
1207: PLA_Obj_local_length(v_pla, &loc_m);
1208: PLA_Obj_local_buffer(v_pla, (void**)&loc_buf);
1209: PLA_Obj_local_stride(v_pla, &loc_stride);
1210: /*
1211: PetscPrintf(PETSC_COMM_SELF," [%d] b - local_m %d local_stride %d, loc_buf: %g %g, nb: %d\n",rank,loc_m,loc_stride,loc_buf[0],loc_buf[(loc_m-1)*loc_stride],lu->nb);
1212: */
1213: VecCreateSeqWithArray(PETSC_COMM_SELF,loc_m*loc_stride,loc_buf,&loc_x);
1214: if (!lu->pla_solved){
1215:
1216: PLA_Temp_comm_row_info(lu->templ,&Plapack_comm_2d,&r_rank,&r_nproc);
1217: PLA_Temp_comm_col_info(lu->templ,&Plapack_comm_2d,&c_rank,&c_nproc);
1219: /* Create IS and cts for VecScatterring */
1220: PLA_Obj_local_length(v_pla, &loc_m);
1221: PLA_Obj_local_stride(v_pla, &loc_stride);
1222: PetscMalloc2(loc_m,PetscInt,&idx_pla,loc_m,PetscInt,&idx_petsc);
1224: rstart = (r_rank*c_nproc+c_rank)*lu->nb;
1225: for (i=0; i<loc_m; i+=lu->nb){
1226: j = 0;
1227: while (j < lu->nb && i+j < loc_m){
1228: idx_petsc[i+j] = rstart + j; j++;
1229: }
1230: rstart += size*lu->nb;
1231: }
1233: for (i=0; i<loc_m; i++) idx_pla[i] = i*loc_stride;
1235: ISCreateGeneral(PETSC_COMM_SELF,loc_m,idx_pla,PETSC_COPY_VALUES,&lu->is_pla);
1236: ISCreateGeneral(PETSC_COMM_SELF,loc_m,idx_petsc,PETSC_COPY_VALUES,&lu->is_petsc);
1237: PetscFree2(idx_pla,idx_petsc);
1238: VecScatterCreate(loc_x,lu->is_pla,x,lu->is_petsc,&lu->ctx);
1239: }
1240: VecScatterBegin(lu->ctx,loc_x,x,INSERT_VALUES,SCATTER_FORWARD);
1241: VecScatterEnd(lu->ctx,loc_x,x,INSERT_VALUES,SCATTER_FORWARD);
1242:
1243: /* Free data */
1244: VecDestroy(&loc_x);
1245: PLA_Obj_free(&v_pla);
1247: lu->pla_solved = PETSC_TRUE;
1248: return(0);
1249: }
1253: PetscErrorCode MatLUFactorNumeric_MPIDense(Mat F,Mat A,const MatFactorInfo *info)
1254: {
1255: Mat_Plapack *lu = (Mat_Plapack*)(F)->spptr;
1257: PetscInt M=A->rmap->N,m=A->rmap->n,rstart,rend;
1258: PetscInt info_pla=0;
1259: PetscScalar *array,one = 1.0;
1262: if (lu->mstruct == SAME_NONZERO_PATTERN){
1263: PLA_Obj_free(&lu->A);
1264: PLA_Obj_free (&lu->pivots);
1265: }
1266: /* Create PLAPACK matrix object */
1267: lu->A = NULL; lu->pivots = NULL;
1268: PLA_Matrix_create(lu->datatype,M,M,lu->templ,PLA_ALIGN_FIRST,PLA_ALIGN_FIRST,&lu->A);
1269: PLA_Obj_set_to_zero(lu->A);
1270: PLA_Mvector_create(MPI_INT,M,1,lu->templ,PLA_ALIGN_FIRST,&lu->pivots);
1272: /* Copy A into lu->A */
1273: PLA_API_begin();
1274: PLA_Obj_API_open(lu->A);
1275: MatGetOwnershipRange(A,&rstart,&rend);
1276: MatGetArray(A,&array);
1277: PLA_API_axpy_matrix_to_global(m,M, &one,(void *)array,m,lu->A,rstart,0);
1278: MatRestoreArray(A,&array);
1279: PLA_Obj_API_close(lu->A);
1280: PLA_API_end();
1282: /* Factor P A -> L U overwriting lower triangular portion of A with L, upper, U */
1283: info_pla = PLA_LU(lu->A,lu->pivots);
1284: if (info_pla != 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_LU_ZRPVT,"Zero pivot encountered at row %d from PLA_LU()",info_pla);
1286: lu->rstart = rstart;
1287: lu->mstruct = SAME_NONZERO_PATTERN;
1288: F->ops->solve = MatSolve_MPIDense;
1289: F->assembled = PETSC_TRUE; /* required by -ksp_view */
1290: return(0);
1291: }
1295: PetscErrorCode MatCholeskyFactorNumeric_MPIDense(Mat F,Mat A,const MatFactorInfo *info)
1296: {
1297: Mat_Plapack *lu = (Mat_Plapack*)F->spptr;
1299: PetscInt M=A->rmap->N,m=A->rmap->n,rstart,rend;
1300: PetscInt info_pla=0;
1301: PetscScalar *array,one = 1.0;
1304: if (lu->mstruct == SAME_NONZERO_PATTERN){
1305: PLA_Obj_free(&lu->A);
1306: }
1307: /* Create PLAPACK matrix object */
1308: lu->A = NULL;
1309: lu->pivots = NULL;
1310: PLA_Matrix_create(lu->datatype,M,M,lu->templ,PLA_ALIGN_FIRST,PLA_ALIGN_FIRST,&lu->A);
1312: /* Copy A into lu->A */
1313: PLA_API_begin();
1314: PLA_Obj_API_open(lu->A);
1315: MatGetOwnershipRange(A,&rstart,&rend);
1316: MatGetArray(A,&array);
1317: PLA_API_axpy_matrix_to_global(m,M, &one,(void *)array,m,lu->A,rstart,0);
1318: MatRestoreArray(A,&array);
1319: PLA_Obj_API_close(lu->A);
1320: PLA_API_end();
1322: /* Factor P A -> Chol */
1323: info_pla = PLA_Chol(PLA_LOWER_TRIANGULAR,lu->A);
1324: if (info_pla != 0) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_MAT_CH_ZRPVT,"Nonpositive definite matrix detected at row %d from PLA_Chol()",info_pla);
1326: lu->rstart = rstart;
1327: lu->mstruct = SAME_NONZERO_PATTERN;
1328: F->ops->solve = MatSolve_MPIDense;
1329: F->assembled = PETSC_TRUE; /* required by -ksp_view */
1330: return(0);
1331: }
1333: /* Note the Petsc perm permutation is ignored */
1336: PetscErrorCode MatCholeskyFactorSymbolic_MPIDense(Mat F,Mat A,IS perm,const MatFactorInfo *info)
1337: {
1339: PetscBool issymmetric,set;
1342: MatIsSymmetricKnown(A,&set,&issymmetric);
1343: if (!set || !issymmetric) SETERRQ(((PetscObject)A)->comm,PETSC_ERR_USER,"Matrix must be set as MAT_SYMMETRIC for CholeskyFactor()");
1344: F->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_MPIDense;
1345: return(0);
1346: }
1348: /* Note the Petsc r and c permutations are ignored */
1351: PetscErrorCode MatLUFactorSymbolic_MPIDense(Mat F,Mat A,IS r,IS c,const MatFactorInfo *info)
1352: {
1354: PetscInt M = A->rmap->N;
1355: Mat_Plapack *lu;
1358: lu = (Mat_Plapack*)F->spptr;
1359: PLA_Mvector_create(MPI_INT,M,1,lu->templ,PLA_ALIGN_FIRST,&lu->pivots);
1360: F->ops->lufactornumeric = MatLUFactorNumeric_MPIDense;
1361: return(0);
1362: }
1367: PetscErrorCode MatFactorGetSolverPackage_mpidense_plapack(Mat A,const MatSolverPackage *type)
1368: {
1370: *type = MATSOLVERPLAPACK;
1371: return(0);
1372: }
1378: PetscErrorCode MatGetFactor_mpidense_plapack(Mat A,MatFactorType ftype,Mat *F)
1379: {
1381: Mat_Plapack *lu;
1382: PetscMPIInt size;
1383: PetscInt M=A->rmap->N;
1386: /* Create the factorization matrix */
1387: MatCreate(((PetscObject)A)->comm,F);
1388: MatSetSizes(*F,A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N);
1389: MatSetType(*F,((PetscObject)A)->type_name);
1390: PetscNewLog(*F,Mat_Plapack,&lu);
1391: (*F)->spptr = (void*)lu;
1393: /* Set default Plapack parameters */
1394: MPI_Comm_size(((PetscObject)A)->comm,&size);
1395: lu->nb = M/size;
1396: if (M - lu->nb*size) lu->nb++; /* without cyclic distribution */
1397:
1398: /* Set runtime options */
1399: PetscOptionsBegin(((PetscObject)A)->comm,((PetscObject)A)->prefix,"PLAPACK Options","Mat");
1400: PetscOptionsInt("-mat_plapack_nb","block size of template vector","None",lu->nb,&lu->nb,PETSC_NULL);
1401: PetscOptionsEnd();
1403: /* Create object distribution template */
1404: lu->templ = NULL;
1405: PLA_Temp_create(lu->nb, 0, &lu->templ);
1407: /* Set the datatype */
1408: #if defined(PETSC_USE_COMPLEX)
1409: lu->datatype = MPI_DOUBLE_COMPLEX;
1410: #else
1411: lu->datatype = MPI_DOUBLE;
1412: #endif
1414: PLA_Matrix_create(lu->datatype,M,A->cmap->N,lu->templ,PLA_ALIGN_FIRST,PLA_ALIGN_FIRST,&lu->A);
1417: lu->pla_solved = PETSC_FALSE; /* MatSolve_Plapack() is called yet */
1418: lu->mstruct = DIFFERENT_NONZERO_PATTERN;
1420: if (ftype == MAT_FACTOR_LU) {
1421: (*F)->ops->lufactorsymbolic = MatLUFactorSymbolic_MPIDense;
1422: } else if (ftype == MAT_FACTOR_CHOLESKY) {
1423: (*F)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_MPIDense;
1424: } else SETERRQ(((PetscObject)A)->comm,PETSC_ERR_SUP,"No incomplete factorizations for dense matrices");
1425: (*F)->factortype = ftype;
1426: PetscObjectComposeFunctionDynamic((PetscObject)(*F),"MatFactorGetSolverPackage_C","MatFactorGetSolverPackage_mpidense_plapack",MatFactorGetSolverPackage_mpidense_plapack);
1427: return(0);
1428: }
1430: #endif
1434: PetscErrorCode MatGetFactor_mpidense_petsc(Mat A,MatFactorType ftype,Mat *F)
1435: {
1436: #if defined(PETSC_HAVE_PLAPACK)
1438: #endif
1441: #if defined(PETSC_HAVE_PLAPACK)
1442: MatGetFactor_mpidense_plapack(A,ftype,F);
1443: #else
1444: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix format %s uses PLAPACK direct solver. Install PLAPACK",((PetscObject)A)->type_name);
1445: #endif
1446: return(0);
1447: }
1451: PetscErrorCode MatAXPY_MPIDense(Mat Y,PetscScalar alpha,Mat X,MatStructure str)
1452: {
1454: Mat_MPIDense *A = (Mat_MPIDense*)Y->data, *B = (Mat_MPIDense*)X->data;
1457: MatAXPY(A->A,alpha,B->A,str);
1458: return(0);
1459: }
1463: PetscErrorCode MatConjugate_MPIDense(Mat mat)
1464: {
1465: Mat_MPIDense *a = (Mat_MPIDense *)mat->data;
1469: MatConjugate(a->A);
1470: return(0);
1471: }
1475: PetscErrorCode MatRealPart_MPIDense(Mat A)
1476: {
1477: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
1481: MatRealPart(a->A);
1482: return(0);
1483: }
1487: PetscErrorCode MatImaginaryPart_MPIDense(Mat A)
1488: {
1489: Mat_MPIDense *a = (Mat_MPIDense*)A->data;
1493: MatImaginaryPart(a->A);
1494: return(0);
1495: }
1500: PetscErrorCode MatGetColumnNorms_MPIDense(Mat A,NormType type,PetscReal *norms)
1501: {
1503: PetscInt i,n;
1504: Mat_MPIDense *a = (Mat_MPIDense*) A->data;
1505: PetscReal *work;
1508: MatGetSize(A,PETSC_NULL,&n);
1509: PetscMalloc(n*sizeof(PetscReal),&work);
1510: MatGetColumnNorms_SeqDense(a->A,type,work);
1511: if (type == NORM_2) {
1512: for (i=0; i<n; i++) work[i] *= work[i];
1513: }
1514: if (type == NORM_INFINITY) {
1515: MPI_Allreduce(work,norms,n,MPIU_REAL,MPIU_MAX,A->hdr.comm);
1516: } else {
1517: MPI_Allreduce(work,norms,n,MPIU_REAL,MPIU_SUM,A->hdr.comm);
1518: }
1519: PetscFree(work);
1520: if (type == NORM_2) {
1521: for (i=0; i<n; i++) norms[i] = PetscSqrtReal(norms[i]);
1522: }
1523: return(0);
1524: }
1526: /* -------------------------------------------------------------------*/
1527: static struct _MatOps MatOps_Values = {MatSetValues_MPIDense,
1528: MatGetRow_MPIDense,
1529: MatRestoreRow_MPIDense,
1530: MatMult_MPIDense,
1531: /* 4*/ MatMultAdd_MPIDense,
1532: MatMultTranspose_MPIDense,
1533: MatMultTransposeAdd_MPIDense,
1534: 0,
1535: 0,
1536: 0,
1537: /*10*/ 0,
1538: 0,
1539: 0,
1540: 0,
1541: MatTranspose_MPIDense,
1542: /*15*/ MatGetInfo_MPIDense,
1543: MatEqual_MPIDense,
1544: MatGetDiagonal_MPIDense,
1545: MatDiagonalScale_MPIDense,
1546: MatNorm_MPIDense,
1547: /*20*/ MatAssemblyBegin_MPIDense,
1548: MatAssemblyEnd_MPIDense,
1549: MatSetOption_MPIDense,
1550: MatZeroEntries_MPIDense,
1551: /*24*/ MatZeroRows_MPIDense,
1552: 0,
1553: 0,
1554: 0,
1555: 0,
1556: /*29*/ MatSetUpPreallocation_MPIDense,
1557: 0,
1558: 0,
1559: MatGetArray_MPIDense,
1560: MatRestoreArray_MPIDense,
1561: /*34*/ MatDuplicate_MPIDense,
1562: 0,
1563: 0,
1564: 0,
1565: 0,
1566: /*39*/ MatAXPY_MPIDense,
1567: MatGetSubMatrices_MPIDense,
1568: 0,
1569: MatGetValues_MPIDense,
1570: 0,
1571: /*44*/ 0,
1572: MatScale_MPIDense,
1573: 0,
1574: 0,
1575: 0,
1576: /*49*/ 0,
1577: 0,
1578: 0,
1579: 0,
1580: 0,
1581: /*54*/ 0,
1582: 0,
1583: 0,
1584: 0,
1585: 0,
1586: /*59*/ MatGetSubMatrix_MPIDense,
1587: MatDestroy_MPIDense,
1588: MatView_MPIDense,
1589: 0,
1590: 0,
1591: /*64*/ 0,
1592: 0,
1593: 0,
1594: 0,
1595: 0,
1596: /*69*/ 0,
1597: 0,
1598: 0,
1599: 0,
1600: 0,
1601: /*74*/ 0,
1602: 0,
1603: 0,
1604: 0,
1605: 0,
1606: /*79*/ 0,
1607: 0,
1608: 0,
1609: 0,
1610: /*83*/ MatLoad_MPIDense,
1611: 0,
1612: 0,
1613: 0,
1614: 0,
1615: 0,
1616: /*89*/
1617: #if defined(PETSC_HAVE_PLAPACK)
1618: MatMatMult_MPIDense_MPIDense,
1619: MatMatMultSymbolic_MPIDense_MPIDense,
1620: MatMatMultNumeric_MPIDense_MPIDense,
1621: #else
1622: 0,
1623: 0,
1624: 0,
1625: #endif
1626: 0,
1627: 0,
1628: /*94*/ 0,
1629: 0,
1630: 0,
1631: 0,
1632: 0,
1633: /*99*/ 0,
1634: 0,
1635: 0,
1636: MatConjugate_MPIDense,
1637: 0,
1638: /*104*/0,
1639: MatRealPart_MPIDense,
1640: MatImaginaryPart_MPIDense,
1641: 0,
1642: 0,
1643: /*109*/0,
1644: 0,
1645: 0,
1646: 0,
1647: 0,
1648: /*114*/0,
1649: 0,
1650: 0,
1651: 0,
1652: 0,
1653: /*119*/0,
1654: 0,
1655: 0,
1656: 0,
1657: 0,
1658: /*124*/0,
1659: MatGetColumnNorms_MPIDense
1660: };
1665: PetscErrorCode MatMPIDenseSetPreallocation_MPIDense(Mat mat,PetscScalar *data)
1666: {
1667: Mat_MPIDense *a;
1671: mat->preallocated = PETSC_TRUE;
1672: /* Note: For now, when data is specified above, this assumes the user correctly
1673: allocates the local dense storage space. We should add error checking. */
1675: a = (Mat_MPIDense*)mat->data;
1676: PetscLayoutSetBlockSize(mat->rmap,1);
1677: PetscLayoutSetBlockSize(mat->cmap,1);
1678: PetscLayoutSetUp(mat->rmap);
1679: PetscLayoutSetUp(mat->cmap);
1680: a->nvec = mat->cmap->n;
1682: MatCreate(PETSC_COMM_SELF,&a->A);
1683: MatSetSizes(a->A,mat->rmap->n,mat->cmap->N,mat->rmap->n,mat->cmap->N);
1684: MatSetType(a->A,MATSEQDENSE);
1685: MatSeqDenseSetPreallocation(a->A,data);
1686: PetscLogObjectParent(mat,a->A);
1687: return(0);
1688: }
1691: /*MC
1692: MATSOLVERPLAPACK = "mpidense" - Parallel LU and Cholesky factorization for MATMPIDENSE matrices
1694: run ./configure with the option --download-plapack
1697: Options Database Keys:
1698: . -mat_plapack_nprows <n> - number of rows in processor partition
1699: . -mat_plapack_npcols <n> - number of columns in processor partition
1700: . -mat_plapack_nb <n> - block size of template vector
1701: . -mat_plapack_nb_alg <n> - algorithmic block size
1702: - -mat_plapack_ckerror <n> - error checking flag
1704: Level: intermediate
1706: .seealso: MatCreateMPIDense(), MATDENSE, MATSEQDENSE, PCFactorSetSolverPackage(), MatSolverPackage
1708: M*/
1713: PetscErrorCode MatCreate_MPIDense(Mat mat)
1714: {
1715: Mat_MPIDense *a;
1719: PetscNewLog(mat,Mat_MPIDense,&a);
1720: mat->data = (void*)a;
1721: PetscMemcpy(mat->ops,&MatOps_Values,sizeof(struct _MatOps));
1723: mat->insertmode = NOT_SET_VALUES;
1724: MPI_Comm_rank(((PetscObject)mat)->comm,&a->rank);
1725: MPI_Comm_size(((PetscObject)mat)->comm,&a->size);
1727: /* build cache for off array entries formed */
1728: a->donotstash = PETSC_FALSE;
1729: MatStashCreate_Private(((PetscObject)mat)->comm,1,&mat->stash);
1731: /* stuff used for matrix vector multiply */
1732: a->lvec = 0;
1733: a->Mvctx = 0;
1734: a->roworiented = PETSC_TRUE;
1736: PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatGetDiagonalBlock_C",
1737: "MatGetDiagonalBlock_MPIDense",
1738: MatGetDiagonalBlock_MPIDense);
1739: PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatMPIDenseSetPreallocation_C",
1740: "MatMPIDenseSetPreallocation_MPIDense",
1741: MatMPIDenseSetPreallocation_MPIDense);
1742: PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatMatMult_mpiaij_mpidense_C",
1743: "MatMatMult_MPIAIJ_MPIDense",
1744: MatMatMult_MPIAIJ_MPIDense);
1745: PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatMatMultSymbolic_mpiaij_mpidense_C",
1746: "MatMatMultSymbolic_MPIAIJ_MPIDense",
1747: MatMatMultSymbolic_MPIAIJ_MPIDense);
1748: PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatMatMultNumeric_mpiaij_mpidense_C",
1749: "MatMatMultNumeric_MPIAIJ_MPIDense",
1750: MatMatMultNumeric_MPIAIJ_MPIDense);
1751: PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatGetFactor_petsc_C",
1752: "MatGetFactor_mpidense_petsc",
1753: MatGetFactor_mpidense_petsc);
1754: #if defined(PETSC_HAVE_PLAPACK)
1755: PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatGetFactor_plapack_C",
1756: "MatGetFactor_mpidense_plapack",
1757: MatGetFactor_mpidense_plapack);
1758: PetscPLAPACKInitializePackage(((PetscObject)mat)->comm);
1759: #endif
1760: PetscObjectChangeTypeName((PetscObject)mat,MATMPIDENSE);
1762: return(0);
1763: }
1766: /*MC
1767: MATDENSE - MATDENSE = "dense" - A matrix type to be used for dense matrices.
1769: This matrix type is identical to MATSEQDENSE when constructed with a single process communicator,
1770: and MATMPIDENSE otherwise.
1772: Options Database Keys:
1773: . -mat_type dense - sets the matrix type to "dense" during a call to MatSetFromOptions()
1775: Level: beginner
1778: .seealso: MatCreateMPIDense,MATSEQDENSE,MATMPIDENSE
1779: M*/
1783: /*@C
1784: MatMPIDenseSetPreallocation - Sets the array used to store the matrix entries
1786: Not collective
1788: Input Parameters:
1789: . A - the matrix
1790: - data - optional location of matrix data. Set data=PETSC_NULL for PETSc
1791: to control all matrix memory allocation.
1793: Notes:
1794: The dense format is fully compatible with standard Fortran 77
1795: storage by columns.
1797: The data input variable is intended primarily for Fortran programmers
1798: who wish to allocate their own matrix memory space. Most users should
1799: set data=PETSC_NULL.
1801: Level: intermediate
1803: .keywords: matrix,dense, parallel
1805: .seealso: MatCreate(), MatCreateSeqDense(), MatSetValues()
1806: @*/
1807: PetscErrorCode MatMPIDenseSetPreallocation(Mat mat,PetscScalar *data)
1808: {
1812: PetscTryMethod(mat,"MatMPIDenseSetPreallocation_C",(Mat,PetscScalar *),(mat,data));
1813: return(0);
1814: }
1818: /*@C
1819: MatCreateMPIDense - Creates a sparse parallel matrix in dense format.
1821: Collective on MPI_Comm
1823: Input Parameters:
1824: + comm - MPI communicator
1825: . m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
1826: . n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
1827: . M - number of global rows (or PETSC_DECIDE to have calculated if m is given)
1828: . N - number of global columns (or PETSC_DECIDE to have calculated if n is given)
1829: - data - optional location of matrix data. Set data=PETSC_NULL (PETSC_NULL_SCALAR for Fortran users) for PETSc
1830: to control all matrix memory allocation.
1832: Output Parameter:
1833: . A - the matrix
1835: Notes:
1836: The dense format is fully compatible with standard Fortran 77
1837: storage by columns.
1839: The data input variable is intended primarily for Fortran programmers
1840: who wish to allocate their own matrix memory space. Most users should
1841: set data=PETSC_NULL (PETSC_NULL_SCALAR for Fortran users).
1843: The user MUST specify either the local or global matrix dimensions
1844: (possibly both).
1846: Level: intermediate
1848: .keywords: matrix,dense, parallel
1850: .seealso: MatCreate(), MatCreateSeqDense(), MatSetValues()
1851: @*/
1852: PetscErrorCode MatCreateMPIDense(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscScalar *data,Mat *A)
1853: {
1855: PetscMPIInt size;
1858: MatCreate(comm,A);
1859: MatSetSizes(*A,m,n,M,N);
1860: MPI_Comm_size(comm,&size);
1861: if (size > 1) {
1862: MatSetType(*A,MATMPIDENSE);
1863: MatMPIDenseSetPreallocation(*A,data);
1864: } else {
1865: MatSetType(*A,MATSEQDENSE);
1866: MatSeqDenseSetPreallocation(*A,data);
1867: }
1868: return(0);
1869: }
1873: static PetscErrorCode MatDuplicate_MPIDense(Mat A,MatDuplicateOption cpvalues,Mat *newmat)
1874: {
1875: Mat mat;
1876: Mat_MPIDense *a,*oldmat = (Mat_MPIDense*)A->data;
1880: *newmat = 0;
1881: MatCreate(((PetscObject)A)->comm,&mat);
1882: MatSetSizes(mat,A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N);
1883: MatSetType(mat,((PetscObject)A)->type_name);
1884: a = (Mat_MPIDense*)mat->data;
1885: PetscMemcpy(mat->ops,A->ops,sizeof(struct _MatOps));
1887: mat->factortype = A->factortype;
1888: mat->assembled = PETSC_TRUE;
1889: mat->preallocated = PETSC_TRUE;
1891: a->size = oldmat->size;
1892: a->rank = oldmat->rank;
1893: mat->insertmode = NOT_SET_VALUES;
1894: a->nvec = oldmat->nvec;
1895: a->donotstash = oldmat->donotstash;
1897: PetscLayoutReference(A->rmap,&mat->rmap);
1898: PetscLayoutReference(A->cmap,&mat->cmap);
1900: MatSetUpMultiply_MPIDense(mat);
1901: MatDuplicate(oldmat->A,cpvalues,&a->A);
1902: PetscLogObjectParent(mat,a->A);
1904: *newmat = mat;
1905: return(0);
1906: }
1910: PetscErrorCode MatLoad_MPIDense_DenseInFile(MPI_Comm comm,PetscInt fd,PetscInt M,PetscInt N,Mat newmat,PetscInt sizesset)
1911: {
1913: PetscMPIInt rank,size;
1914: PetscInt *rowners,i,m,nz,j;
1915: PetscScalar *array,*vals,*vals_ptr;
1918: MPI_Comm_rank(comm,&rank);
1919: MPI_Comm_size(comm,&size);
1921: /* determine ownership of all rows */
1922: if (newmat->rmap->n < 0) m = M/size + ((M % size) > rank);
1923: else m = newmat->rmap->n;
1924: PetscMalloc((size+2)*sizeof(PetscInt),&rowners);
1925: MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);
1926: rowners[0] = 0;
1927: for (i=2; i<=size; i++) {
1928: rowners[i] += rowners[i-1];
1929: }
1931: if (!sizesset) {
1932: MatSetSizes(newmat,m,PETSC_DECIDE,M,N);
1933: }
1934: MatMPIDenseSetPreallocation(newmat,PETSC_NULL);
1935: MatGetArray(newmat,&array);
1937: if (!rank) {
1938: PetscMalloc(m*N*sizeof(PetscScalar),&vals);
1940: /* read in my part of the matrix numerical values */
1941: PetscBinaryRead(fd,vals,m*N,PETSC_SCALAR);
1942:
1943: /* insert into matrix-by row (this is why cannot directly read into array */
1944: vals_ptr = vals;
1945: for (i=0; i<m; i++) {
1946: for (j=0; j<N; j++) {
1947: array[i + j*m] = *vals_ptr++;
1948: }
1949: }
1951: /* read in other processors and ship out */
1952: for (i=1; i<size; i++) {
1953: nz = (rowners[i+1] - rowners[i])*N;
1954: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
1955: MPILong_Send(vals,nz,MPIU_SCALAR,i,((PetscObject)(newmat))->tag,comm);
1956: }
1957: } else {
1958: /* receive numeric values */
1959: PetscMalloc(m*N*sizeof(PetscScalar),&vals);
1961: /* receive message of values*/
1962: MPILong_Recv(vals,m*N,MPIU_SCALAR,0,((PetscObject)(newmat))->tag,comm);
1964: /* insert into matrix-by row (this is why cannot directly read into array */
1965: vals_ptr = vals;
1966: for (i=0; i<m; i++) {
1967: for (j=0; j<N; j++) {
1968: array[i + j*m] = *vals_ptr++;
1969: }
1970: }
1971: }
1972: PetscFree(rowners);
1973: PetscFree(vals);
1974: MatAssemblyBegin(newmat,MAT_FINAL_ASSEMBLY);
1975: MatAssemblyEnd(newmat,MAT_FINAL_ASSEMBLY);
1976: return(0);
1977: }
1981: PetscErrorCode MatLoad_MPIDense(Mat newmat,PetscViewer viewer)
1982: {
1983: PetscScalar *vals,*svals;
1984: MPI_Comm comm = ((PetscObject)viewer)->comm;
1985: MPI_Status status;
1986: PetscMPIInt rank,size,tag = ((PetscObject)viewer)->tag,*rowners,*sndcounts,m,maxnz;
1987: PetscInt header[4],*rowlengths = 0,M,N,*cols;
1988: PetscInt *ourlens,*procsnz = 0,*offlens,jj,*mycols,*smycols;
1989: PetscInt i,nz,j,rstart,rend,sizesset=1,grows,gcols;
1990: int fd;
1994: MPI_Comm_size(comm,&size);
1995: MPI_Comm_rank(comm,&rank);
1996: if (!rank) {
1997: PetscViewerBinaryGetDescriptor(viewer,&fd);
1998: PetscBinaryRead(fd,(char *)header,4,PETSC_INT);
1999: if (header[0] != MAT_FILE_CLASSID) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
2000: }
2001: if (newmat->rmap->n < 0 && newmat->rmap->N < 0 && newmat->cmap->n < 0 && newmat->cmap->N < 0) sizesset = 0;
2003: MPI_Bcast(header+1,3,MPIU_INT,0,comm);
2004: M = header[1]; N = header[2]; nz = header[3];
2006: /* If global rows/cols are set to PETSC_DECIDE, set it to the sizes given in the file */
2007: if (sizesset && newmat->rmap->N < 0) newmat->rmap->N = M;
2008: if (sizesset && newmat->cmap->N < 0) newmat->cmap->N = N;
2009:
2010: /* If global sizes are set, check if they are consistent with that given in the file */
2011: if (sizesset) {
2012: MatGetSize(newmat,&grows,&gcols);
2013: }
2014: if (sizesset && newmat->rmap->N != grows) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Inconsistent # of rows:Matrix in file has (%d) and input matrix has (%d)",M,grows);
2015: if (sizesset && newmat->cmap->N != gcols) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Inconsistent # of cols:Matrix in file has (%d) and input matrix has (%d)",N,gcols);
2017: /*
2018: Handle case where matrix is stored on disk as a dense matrix
2019: */
2020: if (nz == MATRIX_BINARY_FORMAT_DENSE) {
2021: MatLoad_MPIDense_DenseInFile(comm,fd,M,N,newmat,sizesset);
2022: return(0);
2023: }
2025: /* determine ownership of all rows */
2026: if (newmat->rmap->n < 0) m = PetscMPIIntCast(M/size + ((M % size) > rank));
2027: else m = PetscMPIIntCast(newmat->rmap->n);
2028: PetscMalloc((size+2)*sizeof(PetscMPIInt),&rowners);
2029: MPI_Allgather(&m,1,MPI_INT,rowners+1,1,MPI_INT,comm);
2030: rowners[0] = 0;
2031: for (i=2; i<=size; i++) {
2032: rowners[i] += rowners[i-1];
2033: }
2034: rstart = rowners[rank];
2035: rend = rowners[rank+1];
2037: /* distribute row lengths to all processors */
2038: PetscMalloc2(rend-rstart,PetscInt,&ourlens,rend-rstart,PetscInt,&offlens);
2039: if (!rank) {
2040: PetscMalloc(M*sizeof(PetscInt),&rowlengths);
2041: PetscBinaryRead(fd,rowlengths,M,PETSC_INT);
2042: PetscMalloc(size*sizeof(PetscMPIInt),&sndcounts);
2043: for (i=0; i<size; i++) sndcounts[i] = rowners[i+1] - rowners[i];
2044: MPI_Scatterv(rowlengths,sndcounts,rowners,MPIU_INT,ourlens,rend-rstart,MPIU_INT,0,comm);
2045: PetscFree(sndcounts);
2046: } else {
2047: MPI_Scatterv(0,0,0,MPIU_INT,ourlens,rend-rstart,MPIU_INT,0,comm);
2048: }
2050: if (!rank) {
2051: /* calculate the number of nonzeros on each processor */
2052: PetscMalloc(size*sizeof(PetscInt),&procsnz);
2053: PetscMemzero(procsnz,size*sizeof(PetscInt));
2054: for (i=0; i<size; i++) {
2055: for (j=rowners[i]; j< rowners[i+1]; j++) {
2056: procsnz[i] += rowlengths[j];
2057: }
2058: }
2059: PetscFree(rowlengths);
2061: /* determine max buffer needed and allocate it */
2062: maxnz = 0;
2063: for (i=0; i<size; i++) {
2064: maxnz = PetscMax(maxnz,procsnz[i]);
2065: }
2066: PetscMalloc(maxnz*sizeof(PetscInt),&cols);
2068: /* read in my part of the matrix column indices */
2069: nz = procsnz[0];
2070: PetscMalloc(nz*sizeof(PetscInt),&mycols);
2071: PetscBinaryRead(fd,mycols,nz,PETSC_INT);
2073: /* read in every one elses and ship off */
2074: for (i=1; i<size; i++) {
2075: nz = procsnz[i];
2076: PetscBinaryRead(fd,cols,nz,PETSC_INT);
2077: MPI_Send(cols,nz,MPIU_INT,i,tag,comm);
2078: }
2079: PetscFree(cols);
2080: } else {
2081: /* determine buffer space needed for message */
2082: nz = 0;
2083: for (i=0; i<m; i++) {
2084: nz += ourlens[i];
2085: }
2086: PetscMalloc((nz+1)*sizeof(PetscInt),&mycols);
2088: /* receive message of column indices*/
2089: MPI_Recv(mycols,nz,MPIU_INT,0,tag,comm,&status);
2090: MPI_Get_count(&status,MPIU_INT,&maxnz);
2091: if (maxnz != nz) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2092: }
2094: /* loop over local rows, determining number of off diagonal entries */
2095: PetscMemzero(offlens,m*sizeof(PetscInt));
2096: jj = 0;
2097: for (i=0; i<m; i++) {
2098: for (j=0; j<ourlens[i]; j++) {
2099: if (mycols[jj] < rstart || mycols[jj] >= rend) offlens[i]++;
2100: jj++;
2101: }
2102: }
2104: /* create our matrix */
2105: for (i=0; i<m; i++) {
2106: ourlens[i] -= offlens[i];
2107: }
2109: if (!sizesset) {
2110: MatSetSizes(newmat,m,PETSC_DECIDE,M,N);
2111: }
2112: MatMPIDenseSetPreallocation(newmat,PETSC_NULL);
2113: for (i=0; i<m; i++) {
2114: ourlens[i] += offlens[i];
2115: }
2117: if (!rank) {
2118: PetscMalloc(maxnz*sizeof(PetscScalar),&vals);
2120: /* read in my part of the matrix numerical values */
2121: nz = procsnz[0];
2122: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2123:
2124: /* insert into matrix */
2125: jj = rstart;
2126: smycols = mycols;
2127: svals = vals;
2128: for (i=0; i<m; i++) {
2129: MatSetValues(newmat,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);
2130: smycols += ourlens[i];
2131: svals += ourlens[i];
2132: jj++;
2133: }
2135: /* read in other processors and ship out */
2136: for (i=1; i<size; i++) {
2137: nz = procsnz[i];
2138: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2139: MPI_Send(vals,nz,MPIU_SCALAR,i,((PetscObject)newmat)->tag,comm);
2140: }
2141: PetscFree(procsnz);
2142: } else {
2143: /* receive numeric values */
2144: PetscMalloc((nz+1)*sizeof(PetscScalar),&vals);
2146: /* receive message of values*/
2147: MPI_Recv(vals,nz,MPIU_SCALAR,0,((PetscObject)newmat)->tag,comm,&status);
2148: MPI_Get_count(&status,MPIU_SCALAR,&maxnz);
2149: if (maxnz != nz) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2151: /* insert into matrix */
2152: jj = rstart;
2153: smycols = mycols;
2154: svals = vals;
2155: for (i=0; i<m; i++) {
2156: MatSetValues(newmat,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);
2157: smycols += ourlens[i];
2158: svals += ourlens[i];
2159: jj++;
2160: }
2161: }
2162: PetscFree2(ourlens,offlens);
2163: PetscFree(vals);
2164: PetscFree(mycols);
2165: PetscFree(rowners);
2167: MatAssemblyBegin(newmat,MAT_FINAL_ASSEMBLY);
2168: MatAssemblyEnd(newmat,MAT_FINAL_ASSEMBLY);
2169: return(0);
2170: }
2174: PetscErrorCode MatEqual_MPIDense(Mat A,Mat B,PetscBool *flag)
2175: {
2176: Mat_MPIDense *matB = (Mat_MPIDense*)B->data,*matA = (Mat_MPIDense*)A->data;
2177: Mat a,b;
2178: PetscBool flg;
2182: a = matA->A;
2183: b = matB->A;
2184: MatEqual(a,b,&flg);
2185: MPI_Allreduce(&flg,flag,1,MPI_INT,MPI_LAND,((PetscObject)A)->comm);
2186: return(0);
2187: }
2189: #if defined(PETSC_HAVE_PLAPACK)
2193: /*@C
2194: PetscPLAPACKFinalizePackage - This function destroys everything in the Petsc interface to PLAPACK.
2195: Level: developer
2197: .keywords: Petsc, destroy, package, PLAPACK
2198: .seealso: PetscFinalize()
2199: @*/
2200: PetscErrorCode PetscPLAPACKFinalizePackage(void)
2201: {
2205: PLA_Finalize();
2206: return(0);
2207: }
2211: /*@C
2212: PetscPLAPACKInitializePackage - This function initializes everything in the Petsc interface to PLAPACK. It is
2213: called from MatCreate_MPIDense() the first time an MPI dense matrix is called.
2215: Input Parameter:
2216: . comm - the communicator the matrix lives on
2218: Level: developer
2220: Notes: PLAPACK does not have a good fit with MPI communicators; all (parallel) PLAPACK objects have to live in the
2221: same communicator (because there is some global state that is initialized and used for all matrices). In addition if
2222: PLAPACK is initialized (that is the initial matrices created) are on subcommunicators of MPI_COMM_WORLD, these subcommunicators
2223: cannot overlap.
2225: .keywords: Petsc, initialize, package, PLAPACK
2226: .seealso: PetscSysInitializePackage(), PetscInitialize()
2227: @*/
2228: PetscErrorCode PetscPLAPACKInitializePackage(MPI_Comm comm)
2229: {
2230: PetscMPIInt size;
2234: if (!PLA_Initialized(PETSC_NULL)) {
2236: MPI_Comm_size(comm,&size);
2237: Plapack_nprows = 1;
2238: Plapack_npcols = size;
2239:
2240: PetscOptionsBegin(comm,PETSC_NULL,"PLAPACK Options","Mat");
2241: PetscOptionsInt("-mat_plapack_nprows","row dimension of 2D processor mesh","None",Plapack_nprows,&Plapack_nprows,PETSC_NULL);
2242: PetscOptionsInt("-mat_plapack_npcols","column dimension of 2D processor mesh","None",Plapack_npcols,&Plapack_npcols,PETSC_NULL);
2243: #if defined(PETSC_USE_DEBUG)
2244: Plapack_ierror = 3;
2245: #else
2246: Plapack_ierror = 0;
2247: #endif
2248: PetscOptionsInt("-mat_plapack_ckerror","error checking flag","None",Plapack_ierror,&Plapack_ierror,PETSC_NULL);
2249: if (Plapack_ierror){
2250: PLA_Set_error_checking(Plapack_ierror,PETSC_TRUE,PETSC_TRUE,PETSC_FALSE );
2251: } else {
2252: PLA_Set_error_checking(Plapack_ierror,PETSC_FALSE,PETSC_FALSE,PETSC_FALSE );
2253: }
2254:
2255: Plapack_nb_alg = 0;
2256: PetscOptionsInt("-mat_plapack_nb_alg","algorithmic block size","None",Plapack_nb_alg,&Plapack_nb_alg,PETSC_NULL);
2257: if (Plapack_nb_alg) {
2258: pla_Environ_set_nb_alg (PLA_OP_ALL_ALG,Plapack_nb_alg);
2259: }
2260: PetscOptionsEnd();
2262: PLA_Comm_1D_to_2D(comm,Plapack_nprows,Plapack_npcols,&Plapack_comm_2d);
2263: PLA_Init(Plapack_comm_2d);
2264: PetscRegisterFinalize(PetscPLAPACKFinalizePackage);
2265: }
2266: return(0);
2267: }
2269: #endif