Actual source code: mpisbaij.c
2: #include <../src/mat/impls/baij/mpi/mpibaij.h> /*I "petscmat.h" I*/
3: #include <../src/mat/impls/sbaij/mpi/mpisbaij.h>
4: #include <../src/mat/impls/sbaij/seq/sbaij.h>
5: #include <petscblaslapack.h>
26: PetscErrorCode MatStoreValues_MPISBAIJ(Mat mat)
27: {
28: Mat_MPISBAIJ *aij = (Mat_MPISBAIJ *)mat->data;
32: MatStoreValues(aij->A);
33: MatStoreValues(aij->B);
34: return(0);
35: }
41: PetscErrorCode MatRetrieveValues_MPISBAIJ(Mat mat)
42: {
43: Mat_MPISBAIJ *aij = (Mat_MPISBAIJ *)mat->data;
47: MatRetrieveValues(aij->A);
48: MatRetrieveValues(aij->B);
49: return(0);
50: }
54: #define MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv) \
55: { \
56: \
57: brow = row/bs; \
58: rp = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
59: rmax = aimax[brow]; nrow = ailen[brow]; \
60: bcol = col/bs; \
61: ridx = row % bs; cidx = col % bs; \
62: low = 0; high = nrow; \
63: while (high-low > 3) { \
64: t = (low+high)/2; \
65: if (rp[t] > bcol) high = t; \
66: else low = t; \
67: } \
68: for (_i=low; _i<high; _i++) { \
69: if (rp[_i] > bcol) break; \
70: if (rp[_i] == bcol) { \
71: bap = ap + bs2*_i + bs*cidx + ridx; \
72: if (addv == ADD_VALUES) *bap += value; \
73: else *bap = value; \
74: goto a_noinsert; \
75: } \
76: } \
77: if (a->nonew == 1) goto a_noinsert; \
78: if (a->nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \
79: MatSeqXAIJReallocateAIJ(A,a->mbs,bs2,nrow,brow,bcol,rmax,aa,ai,aj,rp,ap,aimax,a->nonew,MatScalar); \
80: N = nrow++ - 1; \
81: /* shift up all the later entries in this row */ \
82: for (ii=N; ii>=_i; ii--) { \
83: rp[ii+1] = rp[ii]; \
84: PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar)); \
85: } \
86: if (N>=_i) { PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar)); } \
87: rp[_i] = bcol; \
88: ap[bs2*_i + bs*cidx + ridx] = value; \
89: a_noinsert:; \
90: ailen[brow] = nrow; \
91: }
93: #define MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv) \
94: { \
95: brow = row/bs; \
96: rp = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
97: rmax = bimax[brow]; nrow = bilen[brow]; \
98: bcol = col/bs; \
99: ridx = row % bs; cidx = col % bs; \
100: low = 0; high = nrow; \
101: while (high-low > 3) { \
102: t = (low+high)/2; \
103: if (rp[t] > bcol) high = t; \
104: else low = t; \
105: } \
106: for (_i=low; _i<high; _i++) { \
107: if (rp[_i] > bcol) break; \
108: if (rp[_i] == bcol) { \
109: bap = ap + bs2*_i + bs*cidx + ridx; \
110: if (addv == ADD_VALUES) *bap += value; \
111: else *bap = value; \
112: goto b_noinsert; \
113: } \
114: } \
115: if (b->nonew == 1) goto b_noinsert; \
116: if (b->nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \
117: MatSeqXAIJReallocateAIJ(B,b->mbs,bs2,nrow,brow,bcol,rmax,ba,bi,bj,rp,ap,bimax,b->nonew,MatScalar); \
118: N = nrow++ - 1; \
119: /* shift up all the later entries in this row */ \
120: for (ii=N; ii>=_i; ii--) { \
121: rp[ii+1] = rp[ii]; \
122: PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar)); \
123: } \
124: if (N>=_i) { PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));} \
125: rp[_i] = bcol; \
126: ap[bs2*_i + bs*cidx + ridx] = value; \
127: b_noinsert:; \
128: bilen[brow] = nrow; \
129: }
131: /* Only add/insert a(i,j) with i<=j (blocks).
132: Any a(i,j) with i>j input by user is ingored.
133: */
136: PetscErrorCode MatSetValues_MPISBAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
137: {
138: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
139: MatScalar value;
140: PetscBool roworiented = baij->roworiented;
142: PetscInt i,j,row,col;
143: PetscInt rstart_orig=mat->rmap->rstart;
144: PetscInt rend_orig=mat->rmap->rend,cstart_orig=mat->cmap->rstart;
145: PetscInt cend_orig=mat->cmap->rend,bs=mat->rmap->bs;
147: /* Some Variables required in the macro */
148: Mat A = baij->A;
149: Mat_SeqSBAIJ *a = (Mat_SeqSBAIJ*)(A)->data;
150: PetscInt *aimax=a->imax,*ai=a->i,*ailen=a->ilen,*aj=a->j;
151: MatScalar *aa=a->a;
153: Mat B = baij->B;
154: Mat_SeqBAIJ *b = (Mat_SeqBAIJ*)(B)->data;
155: PetscInt *bimax=b->imax,*bi=b->i,*bilen=b->ilen,*bj=b->j;
156: MatScalar *ba=b->a;
158: PetscInt *rp,ii,nrow,_i,rmax,N,brow,bcol;
159: PetscInt low,high,t,ridx,cidx,bs2=a->bs2;
160: MatScalar *ap,*bap;
162: /* for stash */
163: PetscInt n_loc, *in_loc = PETSC_NULL;
164: MatScalar *v_loc = PETSC_NULL;
168: if (!baij->donotstash){
169: if (n > baij->n_loc) {
170: PetscFree(baij->in_loc);
171: PetscFree(baij->v_loc);
172: PetscMalloc(n*sizeof(PetscInt),&baij->in_loc);
173: PetscMalloc(n*sizeof(MatScalar),&baij->v_loc);
174: baij->n_loc = n;
175: }
176: in_loc = baij->in_loc;
177: v_loc = baij->v_loc;
178: }
180: for (i=0; i<m; i++) {
181: if (im[i] < 0) continue;
182: #if defined(PETSC_USE_DEBUG)
183: if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
184: #endif
185: if (im[i] >= rstart_orig && im[i] < rend_orig) { /* this processor entry */
186: row = im[i] - rstart_orig; /* local row index */
187: for (j=0; j<n; j++) {
188: if (im[i]/bs > in[j]/bs){
189: if (a->ignore_ltriangular){
190: continue; /* ignore lower triangular blocks */
191: } else {
192: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_USER,"Lower triangular value cannot be set for sbaij format. Ignoring these values, run with -mat_ignore_lower_triangular or call MatSetOption(mat,MAT_IGNORE_LOWER_TRIANGULAR,PETSC_TRUE)");
193: }
194: }
195: if (in[j] >= cstart_orig && in[j] < cend_orig){ /* diag entry (A) */
196: col = in[j] - cstart_orig; /* local col index */
197: brow = row/bs; bcol = col/bs;
198: if (brow > bcol) continue; /* ignore lower triangular blocks of A */
199: if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
200: MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv);
201: /* MatSetValues_SeqBAIJ(baij->A,1,&row,1,&col,&value,addv); */
202: } else if (in[j] < 0) continue;
203: #if defined(PETSC_USE_DEBUG)
204: else if (in[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);
205: #endif
206: else { /* off-diag entry (B) */
207: if (mat->was_assembled) {
208: if (!baij->colmap) {
209: CreateColmap_MPIBAIJ_Private(mat);
210: }
211: #if defined (PETSC_USE_CTABLE)
212: PetscTableFind(baij->colmap,in[j]/bs + 1,&col);
213: col = col - 1;
214: #else
215: col = baij->colmap[in[j]/bs] - 1;
216: #endif
217: if (col < 0 && !((Mat_SeqSBAIJ*)(baij->A->data))->nonew) {
218: DisAssemble_MPISBAIJ(mat);
219: col = in[j];
220: /* Reinitialize the variables required by MatSetValues_SeqBAIJ_B_Private() */
221: B = baij->B;
222: b = (Mat_SeqBAIJ*)(B)->data;
223: bimax=b->imax;bi=b->i;bilen=b->ilen;bj=b->j;
224: ba=b->a;
225: } else col += in[j]%bs;
226: } else col = in[j];
227: if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
228: MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv);
229: /* MatSetValues_SeqBAIJ(baij->B,1,&row,1,&col,&value,addv); */
230: }
231: }
232: } else { /* off processor entry */
233: if (mat->nooffprocentries) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Setting off process row %D even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set",im[i]);
234: if (!baij->donotstash) {
235: n_loc = 0;
236: for (j=0; j<n; j++){
237: if (im[i]/bs > in[j]/bs) continue; /* ignore lower triangular blocks */
238: in_loc[n_loc] = in[j];
239: if (roworiented) {
240: v_loc[n_loc] = v[i*n+j];
241: } else {
242: v_loc[n_loc] = v[j*m+i];
243: }
244: n_loc++;
245: }
246: MatStashValuesRow_Private(&mat->stash,im[i],n_loc,in_loc,v_loc,PETSC_FALSE);
247: }
248: }
249: }
250: return(0);
251: }
255: PetscErrorCode MatSetValuesBlocked_MPISBAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const MatScalar v[],InsertMode addv)
256: {
257: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
258: const MatScalar *value;
259: MatScalar *barray=baij->barray;
260: PetscBool roworiented = baij->roworiented,ignore_ltriangular = ((Mat_SeqSBAIJ*)baij->A->data)->ignore_ltriangular;
261: PetscErrorCode ierr;
262: PetscInt i,j,ii,jj,row,col,rstart=baij->rstartbs;
263: PetscInt rend=baij->rendbs,cstart=baij->rstartbs,stepval;
264: PetscInt cend=baij->rendbs,bs=mat->rmap->bs,bs2=baij->bs2;
267: if(!barray) {
268: PetscMalloc(bs2*sizeof(MatScalar),&barray);
269: baij->barray = barray;
270: }
272: if (roworiented) {
273: stepval = (n-1)*bs;
274: } else {
275: stepval = (m-1)*bs;
276: }
277: for (i=0; i<m; i++) {
278: if (im[i] < 0) continue;
279: #if defined(PETSC_USE_DEBUG)
280: if (im[i] >= baij->Mbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large, row %D max %D",im[i],baij->Mbs-1);
281: #endif
282: if (im[i] >= rstart && im[i] < rend) {
283: row = im[i] - rstart;
284: for (j=0; j<n; j++) {
285: if (im[i] > in[j]) {
286: if (ignore_ltriangular) continue; /* ignore lower triangular blocks */
287: else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_USER,"Lower triangular value cannot be set for sbaij format. Ignoring these values, run with -mat_ignore_lower_triangular or call MatSetOption(mat,MAT_IGNORE_LOWER_TRIANGULAR,PETSC_TRUE)");
288: }
289: /* If NumCol = 1 then a copy is not required */
290: if ((roworiented) && (n == 1)) {
291: barray = (MatScalar*) v + i*bs2;
292: } else if((!roworiented) && (m == 1)) {
293: barray = (MatScalar*) v + j*bs2;
294: } else { /* Here a copy is required */
295: if (roworiented) {
296: value = v + i*(stepval+bs)*bs + j*bs;
297: } else {
298: value = v + j*(stepval+bs)*bs + i*bs;
299: }
300: for (ii=0; ii<bs; ii++,value+=stepval) {
301: for (jj=0; jj<bs; jj++) {
302: *barray++ = *value++;
303: }
304: }
305: barray -=bs2;
306: }
307:
308: if (in[j] >= cstart && in[j] < cend){
309: col = in[j] - cstart;
310: MatSetValuesBlocked_SeqSBAIJ(baij->A,1,&row,1,&col,barray,addv);
311: }
312: else if (in[j] < 0) continue;
313: #if defined(PETSC_USE_DEBUG)
314: else if (in[j] >= baij->Nbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large, col %D max %D",in[j],baij->Nbs-1);
315: #endif
316: else {
317: if (mat->was_assembled) {
318: if (!baij->colmap) {
319: CreateColmap_MPIBAIJ_Private(mat);
320: }
322: #if defined(PETSC_USE_DEBUG)
323: #if defined (PETSC_USE_CTABLE)
324: { PetscInt data;
325: PetscTableFind(baij->colmap,in[j]+1,&data);
326: if ((data - 1) % bs) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Incorrect colmap");
327: }
328: #else
329: if ((baij->colmap[in[j]] - 1) % bs) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Incorrect colmap");
330: #endif
331: #endif
332: #if defined (PETSC_USE_CTABLE)
333: PetscTableFind(baij->colmap,in[j]+1,&col);
334: col = (col - 1)/bs;
335: #else
336: col = (baij->colmap[in[j]] - 1)/bs;
337: #endif
338: if (col < 0 && !((Mat_SeqBAIJ*)(baij->A->data))->nonew) {
339: DisAssemble_MPISBAIJ(mat);
340: col = in[j];
341: }
342: }
343: else col = in[j];
344: MatSetValuesBlocked_SeqBAIJ(baij->B,1,&row,1,&col,barray,addv);
345: }
346: }
347: } else {
348: if (mat->nooffprocentries) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Setting off process row %D even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set",im[i]);
349: if (!baij->donotstash) {
350: if (roworiented) {
351: MatStashValuesRowBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
352: } else {
353: MatStashValuesColBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
354: }
355: }
356: }
357: }
358: return(0);
359: }
363: PetscErrorCode MatGetValues_MPISBAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
364: {
365: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
367: PetscInt bs=mat->rmap->bs,i,j,bsrstart = mat->rmap->rstart,bsrend = mat->rmap->rend;
368: PetscInt bscstart = mat->cmap->rstart,bscend = mat->cmap->rend,row,col,data;
371: for (i=0; i<m; i++) {
372: if (idxm[i] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]); */
373: if (idxm[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->rmap->N-1);
374: if (idxm[i] >= bsrstart && idxm[i] < bsrend) {
375: row = idxm[i] - bsrstart;
376: for (j=0; j<n; j++) {
377: if (idxn[j] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative column %D",idxn[j]); */
378: if (idxn[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->cmap->N-1);
379: if (idxn[j] >= bscstart && idxn[j] < bscend){
380: col = idxn[j] - bscstart;
381: MatGetValues_SeqSBAIJ(baij->A,1,&row,1,&col,v+i*n+j);
382: } else {
383: if (!baij->colmap) {
384: CreateColmap_MPIBAIJ_Private(mat);
385: }
386: #if defined (PETSC_USE_CTABLE)
387: PetscTableFind(baij->colmap,idxn[j]/bs+1,&data);
388: data --;
389: #else
390: data = baij->colmap[idxn[j]/bs]-1;
391: #endif
392: if((data < 0) || (baij->garray[data/bs] != idxn[j]/bs)) *(v+i*n+j) = 0.0;
393: else {
394: col = data + idxn[j]%bs;
395: MatGetValues_SeqBAIJ(baij->B,1,&row,1,&col,v+i*n+j);
396: }
397: }
398: }
399: } else {
400: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local values currently supported");
401: }
402: }
403: return(0);
404: }
408: PetscErrorCode MatNorm_MPISBAIJ(Mat mat,NormType type,PetscReal *norm)
409: {
410: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
412: PetscReal sum[2],*lnorm2;
415: if (baij->size == 1) {
416: MatNorm(baij->A,type,norm);
417: } else {
418: if (type == NORM_FROBENIUS) {
419: PetscMalloc(2*sizeof(PetscReal),&lnorm2);
420: MatNorm(baij->A,type,lnorm2);
421: *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2++; /* squar power of norm(A) */
422: MatNorm(baij->B,type,lnorm2);
423: *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2--; /* squar power of norm(B) */
424: MPI_Allreduce(lnorm2,&sum,2,MPIU_REAL,MPIU_SUM,((PetscObject)mat)->comm);
425: *norm = PetscSqrtReal(sum[0] + 2*sum[1]);
426: PetscFree(lnorm2);
427: } else if (type == NORM_INFINITY || type == NORM_1) { /* max row/column sum */
428: Mat_SeqSBAIJ *amat=(Mat_SeqSBAIJ*)baij->A->data;
429: Mat_SeqBAIJ *bmat=(Mat_SeqBAIJ*)baij->B->data;
430: PetscReal *rsum,*rsum2,vabs;
431: PetscInt *jj,*garray=baij->garray,rstart=baij->rstartbs,nz;
432: PetscInt brow,bcol,col,bs=baij->A->rmap->bs,row,grow,gcol,mbs=amat->mbs;
433: MatScalar *v;
435: PetscMalloc2(mat->cmap->N,PetscReal,&rsum,mat->cmap->N,PetscReal,&rsum2);
436: PetscMemzero(rsum,mat->cmap->N*sizeof(PetscReal));
437: /* Amat */
438: v = amat->a; jj = amat->j;
439: for (brow=0; brow<mbs; brow++) {
440: grow = bs*(rstart + brow);
441: nz = amat->i[brow+1] - amat->i[brow];
442: for (bcol=0; bcol<nz; bcol++){
443: gcol = bs*(rstart + *jj); jj++;
444: for (col=0; col<bs; col++){
445: for (row=0; row<bs; row++){
446: vabs = PetscAbsScalar(*v); v++;
447: rsum[gcol+col] += vabs;
448: /* non-diagonal block */
449: if (bcol > 0 && vabs > 0.0) rsum[grow+row] += vabs;
450: }
451: }
452: }
453: }
454: /* Bmat */
455: v = bmat->a; jj = bmat->j;
456: for (brow=0; brow<mbs; brow++) {
457: grow = bs*(rstart + brow);
458: nz = bmat->i[brow+1] - bmat->i[brow];
459: for (bcol=0; bcol<nz; bcol++){
460: gcol = bs*garray[*jj]; jj++;
461: for (col=0; col<bs; col++){
462: for (row=0; row<bs; row++){
463: vabs = PetscAbsScalar(*v); v++;
464: rsum[gcol+col] += vabs;
465: rsum[grow+row] += vabs;
466: }
467: }
468: }
469: }
470: MPI_Allreduce(rsum,rsum2,mat->cmap->N,MPIU_REAL,MPIU_SUM,((PetscObject)mat)->comm);
471: *norm = 0.0;
472: for (col=0; col<mat->cmap->N; col++) {
473: if (rsum2[col] > *norm) *norm = rsum2[col];
474: }
475: PetscFree2(rsum,rsum2);
476: } else {
477: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"No support for this norm yet");
478: }
479: }
480: return(0);
481: }
485: PetscErrorCode MatAssemblyBegin_MPISBAIJ(Mat mat,MatAssemblyType mode)
486: {
487: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
489: PetscInt nstash,reallocs;
490: InsertMode addv;
493: if (baij->donotstash || mat->nooffprocentries) {
494: return(0);
495: }
497: /* make sure all processors are either in INSERTMODE or ADDMODE */
498: MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,((PetscObject)mat)->comm);
499: if (addv == (ADD_VALUES|INSERT_VALUES)) SETERRQ(((PetscObject)mat)->comm,PETSC_ERR_ARG_WRONGSTATE,"Some processors inserted others added");
500: mat->insertmode = addv; /* in case this processor had no cache */
502: MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);
503: MatStashScatterBegin_Private(mat,&mat->bstash,baij->rangebs);
504: MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
505: PetscInfo2(mat,"Stash has %D entries,uses %D mallocs.\n",nstash,reallocs);
506: MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
507: PetscInfo2(mat,"Block-Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
508: return(0);
509: }
513: PetscErrorCode MatAssemblyEnd_MPISBAIJ(Mat mat,MatAssemblyType mode)
514: {
515: Mat_MPISBAIJ *baij=(Mat_MPISBAIJ*)mat->data;
516: Mat_SeqSBAIJ *a=(Mat_SeqSBAIJ*)baij->A->data;
518: PetscInt i,j,rstart,ncols,flg,bs2=baij->bs2;
519: PetscInt *row,*col;
520: PetscBool other_disassembled;
521: PetscMPIInt n;
522: PetscBool r1,r2,r3;
523: MatScalar *val;
524: InsertMode addv = mat->insertmode;
526: /* do not use 'b=(Mat_SeqBAIJ*)baij->B->data' as B can be reset in disassembly */
529: if (!baij->donotstash && !mat->nooffprocentries) {
530: while (1) {
531: MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
532: if (!flg) break;
534: for (i=0; i<n;) {
535: /* Now identify the consecutive vals belonging to the same row */
536: for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
537: if (j < n) ncols = j-i;
538: else ncols = n-i;
539: /* Now assemble all these values with a single function call */
540: MatSetValues_MPISBAIJ(mat,1,row+i,ncols,col+i,val+i,addv);
541: i = j;
542: }
543: }
544: MatStashScatterEnd_Private(&mat->stash);
545: /* Now process the block-stash. Since the values are stashed column-oriented,
546: set the roworiented flag to column oriented, and after MatSetValues()
547: restore the original flags */
548: r1 = baij->roworiented;
549: r2 = a->roworiented;
550: r3 = ((Mat_SeqBAIJ*)baij->B->data)->roworiented;
551: baij->roworiented = PETSC_FALSE;
552: a->roworiented = PETSC_FALSE;
553: ((Mat_SeqBAIJ*)baij->B->data)->roworiented = PETSC_FALSE; /* b->roworinted */
554: while (1) {
555: MatStashScatterGetMesg_Private(&mat->bstash,&n,&row,&col,&val,&flg);
556: if (!flg) break;
557:
558: for (i=0; i<n;) {
559: /* Now identify the consecutive vals belonging to the same row */
560: for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
561: if (j < n) ncols = j-i;
562: else ncols = n-i;
563: MatSetValuesBlocked_MPISBAIJ(mat,1,row+i,ncols,col+i,val+i*bs2,addv);
564: i = j;
565: }
566: }
567: MatStashScatterEnd_Private(&mat->bstash);
568: baij->roworiented = r1;
569: a->roworiented = r2;
570: ((Mat_SeqBAIJ*)baij->B->data)->roworiented = r3; /* b->roworinted */
571: }
573: MatAssemblyBegin(baij->A,mode);
574: MatAssemblyEnd(baij->A,mode);
576: /* determine if any processor has disassembled, if so we must
577: also disassemble ourselfs, in order that we may reassemble. */
578: /*
579: if nonzero structure of submatrix B cannot change then we know that
580: no processor disassembled thus we can skip this stuff
581: */
582: if (!((Mat_SeqBAIJ*)baij->B->data)->nonew) {
583: MPI_Allreduce(&mat->was_assembled,&other_disassembled,1,MPI_INT,MPI_PROD,((PetscObject)mat)->comm);
584: if (mat->was_assembled && !other_disassembled) {
585: DisAssemble_MPISBAIJ(mat);
586: }
587: }
589: if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
590: MatSetUpMultiply_MPISBAIJ(mat); /* setup Mvctx and sMvctx */
591: }
592: MatSetOption(baij->B,MAT_CHECK_COMPRESSED_ROW,PETSC_TRUE);
593: MatAssemblyBegin(baij->B,mode);
594: MatAssemblyEnd(baij->B,mode);
595:
596: PetscFree2(baij->rowvalues,baij->rowindices);
597: baij->rowvalues = 0;
599: return(0);
600: }
605: static PetscErrorCode MatView_MPISBAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
606: {
607: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
608: PetscErrorCode ierr;
609: PetscInt bs = mat->rmap->bs;
610: PetscMPIInt size = baij->size,rank = baij->rank;
611: PetscBool iascii,isdraw;
612: PetscViewer sviewer;
613: PetscViewerFormat format;
616: PetscTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
617: PetscTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
618: if (iascii) {
619: PetscViewerGetFormat(viewer,&format);
620: if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
621: MatInfo info;
622: MPI_Comm_rank(((PetscObject)mat)->comm,&rank);
623: MatGetInfo(mat,MAT_LOCAL,&info);
624: PetscViewerASCIISynchronizedAllow(viewer,PETSC_TRUE);
625: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D bs %D mem %D\n",rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,mat->rmap->bs,(PetscInt)info.memory);
626: MatGetInfo(baij->A,MAT_LOCAL,&info);
627: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
628: MatGetInfo(baij->B,MAT_LOCAL,&info);
629: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);
630: PetscViewerFlush(viewer);
631: PetscViewerASCIISynchronizedAllow(viewer,PETSC_FALSE);
632: PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");
633: VecScatterView(baij->Mvctx,viewer);
634: return(0);
635: } else if (format == PETSC_VIEWER_ASCII_INFO) {
636: PetscViewerASCIIPrintf(viewer," block size is %D\n",bs);
637: return(0);
638: } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
639: return(0);
640: }
641: }
643: if (isdraw) {
644: PetscDraw draw;
645: PetscBool isnull;
646: PetscViewerDrawGetDraw(viewer,0,&draw);
647: PetscDrawIsNull(draw,&isnull); if (isnull) return(0);
648: }
650: if (size == 1) {
651: PetscObjectSetName((PetscObject)baij->A,((PetscObject)mat)->name);
652: MatView(baij->A,viewer);
653: } else {
654: /* assemble the entire matrix onto first processor. */
655: Mat A;
656: Mat_SeqSBAIJ *Aloc;
657: Mat_SeqBAIJ *Bloc;
658: PetscInt M = mat->rmap->N,N = mat->cmap->N,*ai,*aj,col,i,j,k,*rvals,mbs = baij->mbs;
659: MatScalar *a;
661: /* Should this be the same type as mat? */
662: MatCreate(((PetscObject)mat)->comm,&A);
663: if (!rank) {
664: MatSetSizes(A,M,N,M,N);
665: } else {
666: MatSetSizes(A,0,0,M,N);
667: }
668: MatSetType(A,MATMPISBAIJ);
669: MatMPISBAIJSetPreallocation(A,mat->rmap->bs,0,PETSC_NULL,0,PETSC_NULL);
670: PetscLogObjectParent(mat,A);
672: /* copy over the A part */
673: Aloc = (Mat_SeqSBAIJ*)baij->A->data;
674: ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
675: PetscMalloc(bs*sizeof(PetscInt),&rvals);
677: for (i=0; i<mbs; i++) {
678: rvals[0] = bs*(baij->rstartbs + i);
679: for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
680: for (j=ai[i]; j<ai[i+1]; j++) {
681: col = (baij->cstartbs+aj[j])*bs;
682: for (k=0; k<bs; k++) {
683: MatSetValues_MPISBAIJ(A,bs,rvals,1,&col,a,INSERT_VALUES);
684: col++; a += bs;
685: }
686: }
687: }
688: /* copy over the B part */
689: Bloc = (Mat_SeqBAIJ*)baij->B->data;
690: ai = Bloc->i; aj = Bloc->j; a = Bloc->a;
691: for (i=0; i<mbs; i++) {
692:
693: rvals[0] = bs*(baij->rstartbs + i);
694: for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
695: for (j=ai[i]; j<ai[i+1]; j++) {
696: col = baij->garray[aj[j]]*bs;
697: for (k=0; k<bs; k++) {
698: MatSetValues_MPIBAIJ(A,bs,rvals,1,&col,a,INSERT_VALUES);
699: col++; a += bs;
700: }
701: }
702: }
703: PetscFree(rvals);
704: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
705: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
706: /*
707: Everyone has to call to draw the matrix since the graphics waits are
708: synchronized across all processors that share the PetscDraw object
709: */
710: PetscViewerGetSingleton(viewer,&sviewer);
711: if (!rank) {
712: PetscObjectSetName((PetscObject)((Mat_MPISBAIJ*)(A->data))->A,((PetscObject)mat)->name);
713: /* Set the type name to MATMPISBAIJ so that the correct type can be printed out by PetscObjectPrintClassNamePrefixType() in MatView_SeqSBAIJ_ASCII()*/
714: PetscStrcpy(((PetscObject)((Mat_MPISBAIJ*)(A->data))->A)->type_name,MATMPISBAIJ);
715: MatView(((Mat_MPISBAIJ*)(A->data))->A,sviewer);
716: }
717: PetscViewerRestoreSingleton(viewer,&sviewer);
718: MatDestroy(&A);
719: }
720: return(0);
721: }
725: PetscErrorCode MatView_MPISBAIJ(Mat mat,PetscViewer viewer)
726: {
728: PetscBool iascii,isdraw,issocket,isbinary;
731: PetscTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
732: PetscTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);
733: PetscTypeCompare((PetscObject)viewer,PETSCVIEWERSOCKET,&issocket);
734: PetscTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);
735: if (iascii || isdraw || issocket || isbinary) {
736: MatView_MPISBAIJ_ASCIIorDraworSocket(mat,viewer);
737: } else {
738: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Viewer type %s not supported by MPISBAIJ matrices",((PetscObject)viewer)->type_name);
739: }
740: return(0);
741: }
745: PetscErrorCode MatDestroy_MPISBAIJ(Mat mat)
746: {
747: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
751: #if defined(PETSC_USE_LOG)
752: PetscLogObjectState((PetscObject)mat,"Rows=%D,Cols=%D",mat->rmap->N,mat->cmap->N);
753: #endif
754: MatStashDestroy_Private(&mat->stash);
755: MatStashDestroy_Private(&mat->bstash);
756: MatDestroy(&baij->A);
757: MatDestroy(&baij->B);
758: #if defined (PETSC_USE_CTABLE)
759: PetscTableDestroy(&baij->colmap);
760: #else
761: PetscFree(baij->colmap);
762: #endif
763: PetscFree(baij->garray);
764: VecDestroy(&baij->lvec);
765: VecScatterDestroy(&baij->Mvctx);
766: VecDestroy(&baij->slvec0);
767: VecDestroy(&baij->slvec0b);
768: VecDestroy(&baij->slvec1);
769: VecDestroy(&baij->slvec1a);
770: VecDestroy(&baij->slvec1b);
771: VecScatterDestroy(&baij->sMvctx);
772: PetscFree2(baij->rowvalues,baij->rowindices);
773: PetscFree(baij->barray);
774: PetscFree(baij->hd);
775: VecDestroy(&baij->diag);
776: VecDestroy(&baij->bb1);
777: VecDestroy(&baij->xx1);
778: #if defined(PETSC_USE_REAL_MAT_SINGLE)
779: PetscFree(baij->setvaluescopy);
780: #endif
781: PetscFree(baij->in_loc);
782: PetscFree(baij->v_loc);
783: PetscFree(baij->rangebs);
784: PetscFree(mat->data);
786: PetscObjectChangeTypeName((PetscObject)mat,0);
787: PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C","",PETSC_NULL);
788: PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C","",PETSC_NULL);
789: PetscObjectComposeFunction((PetscObject)mat,"MatGetDiagonalBlock_C","",PETSC_NULL);
790: PetscObjectComposeFunction((PetscObject)mat,"MatMPISBAIJSetPreallocation_C","",PETSC_NULL);
791: PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpisbaij_mpisbstrm_C","",PETSC_NULL);
792: return(0);
793: }
797: PetscErrorCode MatMult_MPISBAIJ_Hermitian(Mat A,Vec xx,Vec yy)
798: {
799: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
801: PetscInt nt,mbs=a->mbs,bs=A->rmap->bs;
802: PetscScalar *x,*from;
803:
805: VecGetLocalSize(xx,&nt);
806: if (nt != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");
808: /* diagonal part */
809: (*a->A->ops->mult)(a->A,xx,a->slvec1a);
810: VecSet(a->slvec1b,0.0);
812: /* subdiagonal part */
813: (*a->B->ops->multhermitiantranspose)(a->B,xx,a->slvec0b);
815: /* copy x into the vec slvec0 */
816: VecGetArray(a->slvec0,&from);
817: VecGetArray(xx,&x);
819: PetscMemcpy(from,x,bs*mbs*sizeof(MatScalar));
820: VecRestoreArray(a->slvec0,&from);
821: VecRestoreArray(xx,&x);
822:
823: VecScatterBegin(a->sMvctx,a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD);
824: VecScatterEnd(a->sMvctx,a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD);
825: /* supperdiagonal part */
826: (*a->B->ops->multadd)(a->B,a->slvec1b,a->slvec1a,yy);
827: return(0);
828: }
832: PetscErrorCode MatMult_MPISBAIJ(Mat A,Vec xx,Vec yy)
833: {
834: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
836: PetscInt nt,mbs=a->mbs,bs=A->rmap->bs;
837: PetscScalar *x,*from;
838:
840: VecGetLocalSize(xx,&nt);
841: if (nt != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");
843: /* diagonal part */
844: (*a->A->ops->mult)(a->A,xx,a->slvec1a);
845: VecSet(a->slvec1b,0.0);
847: /* subdiagonal part */
848: (*a->B->ops->multtranspose)(a->B,xx,a->slvec0b);
850: /* copy x into the vec slvec0 */
851: VecGetArray(a->slvec0,&from);
852: VecGetArray(xx,&x);
854: PetscMemcpy(from,x,bs*mbs*sizeof(MatScalar));
855: VecRestoreArray(a->slvec0,&from);
856: VecRestoreArray(xx,&x);
857:
858: VecScatterBegin(a->sMvctx,a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD);
859: VecScatterEnd(a->sMvctx,a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD);
860: /* supperdiagonal part */
861: (*a->B->ops->multadd)(a->B,a->slvec1b,a->slvec1a,yy);
862: return(0);
863: }
867: PetscErrorCode MatMult_MPISBAIJ_2comm(Mat A,Vec xx,Vec yy)
868: {
869: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
871: PetscInt nt;
874: VecGetLocalSize(xx,&nt);
875: if (nt != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");
877: VecGetLocalSize(yy,&nt);
878: if (nt != A->rmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible parition of A and yy");
880: VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
881: /* do diagonal part */
882: (*a->A->ops->mult)(a->A,xx,yy);
883: /* do supperdiagonal part */
884: VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
885: (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);
886: /* do subdiagonal part */
887: (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
888: VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
889: VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
891: return(0);
892: }
896: PetscErrorCode MatMultAdd_MPISBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
897: {
898: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
900: PetscInt mbs=a->mbs,bs=A->rmap->bs;
901: PetscScalar *x,*from,zero=0.0;
902:
904: /*
905: PetscSynchronizedPrintf(((PetscObject)A)->comm," MatMultAdd is called ...\n");
906: PetscSynchronizedFlush(((PetscObject)A)->comm);
907: */
908: /* diagonal part */
909: (*a->A->ops->multadd)(a->A,xx,yy,a->slvec1a);
910: VecSet(a->slvec1b,zero);
912: /* subdiagonal part */
913: (*a->B->ops->multtranspose)(a->B,xx,a->slvec0b);
915: /* copy x into the vec slvec0 */
916: VecGetArray(a->slvec0,&from);
917: VecGetArray(xx,&x);
918: PetscMemcpy(from,x,bs*mbs*sizeof(MatScalar));
919: VecRestoreArray(a->slvec0,&from);
920:
921: VecScatterBegin(a->sMvctx,a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD);
922: VecRestoreArray(xx,&x);
923: VecScatterEnd(a->sMvctx,a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD);
924:
925: /* supperdiagonal part */
926: (*a->B->ops->multadd)(a->B,a->slvec1b,a->slvec1a,zz);
927:
928: return(0);
929: }
933: PetscErrorCode MatMultAdd_MPISBAIJ_2comm(Mat A,Vec xx,Vec yy,Vec zz)
934: {
935: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
939: VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
940: /* do diagonal part */
941: (*a->A->ops->multadd)(a->A,xx,yy,zz);
942: /* do supperdiagonal part */
943: VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
944: (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);
946: /* do subdiagonal part */
947: (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
948: VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
949: VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
951: return(0);
952: }
954: /*
955: This only works correctly for square matrices where the subblock A->A is the
956: diagonal block
957: */
960: PetscErrorCode MatGetDiagonal_MPISBAIJ(Mat A,Vec v)
961: {
962: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
966: /* if (a->rmap->N != a->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block"); */
967: MatGetDiagonal(a->A,v);
968: return(0);
969: }
973: PetscErrorCode MatScale_MPISBAIJ(Mat A,PetscScalar aa)
974: {
975: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
979: MatScale(a->A,aa);
980: MatScale(a->B,aa);
981: return(0);
982: }
986: PetscErrorCode MatGetRow_MPISBAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
987: {
988: Mat_MPISBAIJ *mat = (Mat_MPISBAIJ*)matin->data;
989: PetscScalar *vworkA,*vworkB,**pvA,**pvB,*v_p;
991: PetscInt bs = matin->rmap->bs,bs2 = mat->bs2,i,*cworkA,*cworkB,**pcA,**pcB;
992: PetscInt nztot,nzA,nzB,lrow,brstart = matin->rmap->rstart,brend = matin->rmap->rend;
993: PetscInt *cmap,*idx_p,cstart = mat->rstartbs;
996: if (mat->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Already active");
997: mat->getrowactive = PETSC_TRUE;
999: if (!mat->rowvalues && (idx || v)) {
1000: /*
1001: allocate enough space to hold information from the longest row.
1002: */
1003: Mat_SeqSBAIJ *Aa = (Mat_SeqSBAIJ*)mat->A->data;
1004: Mat_SeqBAIJ *Ba = (Mat_SeqBAIJ*)mat->B->data;
1005: PetscInt max = 1,mbs = mat->mbs,tmp;
1006: for (i=0; i<mbs; i++) {
1007: tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i]; /* row length */
1008: if (max < tmp) { max = tmp; }
1009: }
1010: PetscMalloc2(max*bs2,PetscScalar,&mat->rowvalues,max*bs2,PetscInt,&mat->rowindices);
1011: }
1012:
1013: if (row < brstart || row >= brend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local rows");
1014: lrow = row - brstart; /* local row index */
1016: pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1017: if (!v) {pvA = 0; pvB = 0;}
1018: if (!idx) {pcA = 0; if (!v) pcB = 0;}
1019: (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);
1020: (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);
1021: nztot = nzA + nzB;
1023: cmap = mat->garray;
1024: if (v || idx) {
1025: if (nztot) {
1026: /* Sort by increasing column numbers, assuming A and B already sorted */
1027: PetscInt imark = -1;
1028: if (v) {
1029: *v = v_p = mat->rowvalues;
1030: for (i=0; i<nzB; i++) {
1031: if (cmap[cworkB[i]/bs] < cstart) v_p[i] = vworkB[i];
1032: else break;
1033: }
1034: imark = i;
1035: for (i=0; i<nzA; i++) v_p[imark+i] = vworkA[i];
1036: for (i=imark; i<nzB; i++) v_p[nzA+i] = vworkB[i];
1037: }
1038: if (idx) {
1039: *idx = idx_p = mat->rowindices;
1040: if (imark > -1) {
1041: for (i=0; i<imark; i++) {
1042: idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs;
1043: }
1044: } else {
1045: for (i=0; i<nzB; i++) {
1046: if (cmap[cworkB[i]/bs] < cstart)
1047: idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1048: else break;
1049: }
1050: imark = i;
1051: }
1052: for (i=0; i<nzA; i++) idx_p[imark+i] = cstart*bs + cworkA[i];
1053: for (i=imark; i<nzB; i++) idx_p[nzA+i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1054: }
1055: } else {
1056: if (idx) *idx = 0;
1057: if (v) *v = 0;
1058: }
1059: }
1060: *nz = nztot;
1061: (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);
1062: (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);
1063: return(0);
1064: }
1068: PetscErrorCode MatRestoreRow_MPISBAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1069: {
1070: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1073: if (!baij->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"MatGetRow() must be called first");
1074: baij->getrowactive = PETSC_FALSE;
1075: return(0);
1076: }
1080: PetscErrorCode MatGetRowUpperTriangular_MPISBAIJ(Mat A)
1081: {
1082: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1083: Mat_SeqSBAIJ *aA = (Mat_SeqSBAIJ*)a->A->data;
1086: aA->getrow_utriangular = PETSC_TRUE;
1087: return(0);
1088: }
1091: PetscErrorCode MatRestoreRowUpperTriangular_MPISBAIJ(Mat A)
1092: {
1093: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1094: Mat_SeqSBAIJ *aA = (Mat_SeqSBAIJ*)a->A->data;
1097: aA->getrow_utriangular = PETSC_FALSE;
1098: return(0);
1099: }
1103: PetscErrorCode MatRealPart_MPISBAIJ(Mat A)
1104: {
1105: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1109: MatRealPart(a->A);
1110: MatRealPart(a->B);
1111: return(0);
1112: }
1116: PetscErrorCode MatImaginaryPart_MPISBAIJ(Mat A)
1117: {
1118: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1122: MatImaginaryPart(a->A);
1123: MatImaginaryPart(a->B);
1124: return(0);
1125: }
1129: PetscErrorCode MatZeroEntries_MPISBAIJ(Mat A)
1130: {
1131: Mat_MPISBAIJ *l = (Mat_MPISBAIJ*)A->data;
1135: MatZeroEntries(l->A);
1136: MatZeroEntries(l->B);
1137: return(0);
1138: }
1142: PetscErrorCode MatGetInfo_MPISBAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1143: {
1144: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)matin->data;
1145: Mat A = a->A,B = a->B;
1147: PetscReal isend[5],irecv[5];
1150: info->block_size = (PetscReal)matin->rmap->bs;
1151: MatGetInfo(A,MAT_LOCAL,info);
1152: isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1153: isend[3] = info->memory; isend[4] = info->mallocs;
1154: MatGetInfo(B,MAT_LOCAL,info);
1155: isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1156: isend[3] += info->memory; isend[4] += info->mallocs;
1157: if (flag == MAT_LOCAL) {
1158: info->nz_used = isend[0];
1159: info->nz_allocated = isend[1];
1160: info->nz_unneeded = isend[2];
1161: info->memory = isend[3];
1162: info->mallocs = isend[4];
1163: } else if (flag == MAT_GLOBAL_MAX) {
1164: MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_MAX,((PetscObject)matin)->comm);
1165: info->nz_used = irecv[0];
1166: info->nz_allocated = irecv[1];
1167: info->nz_unneeded = irecv[2];
1168: info->memory = irecv[3];
1169: info->mallocs = irecv[4];
1170: } else if (flag == MAT_GLOBAL_SUM) {
1171: MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_SUM,((PetscObject)matin)->comm);
1172: info->nz_used = irecv[0];
1173: info->nz_allocated = irecv[1];
1174: info->nz_unneeded = irecv[2];
1175: info->memory = irecv[3];
1176: info->mallocs = irecv[4];
1177: } else {
1178: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Unknown MatInfoType argument %d",(int)flag);
1179: }
1180: info->fill_ratio_given = 0; /* no parallel LU/ILU/Cholesky */
1181: info->fill_ratio_needed = 0;
1182: info->factor_mallocs = 0;
1183: return(0);
1184: }
1188: PetscErrorCode MatSetOption_MPISBAIJ(Mat A,MatOption op,PetscBool flg)
1189: {
1190: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1191: Mat_SeqSBAIJ *aA = (Mat_SeqSBAIJ*)a->A->data;
1195: switch (op) {
1196: case MAT_NEW_NONZERO_LOCATIONS:
1197: case MAT_NEW_NONZERO_ALLOCATION_ERR:
1198: case MAT_UNUSED_NONZERO_LOCATION_ERR:
1199: case MAT_KEEP_NONZERO_PATTERN:
1200: case MAT_NEW_NONZERO_LOCATION_ERR:
1201: MatSetOption(a->A,op,flg);
1202: MatSetOption(a->B,op,flg);
1203: break;
1204: case MAT_ROW_ORIENTED:
1205: a->roworiented = flg;
1206: MatSetOption(a->A,op,flg);
1207: MatSetOption(a->B,op,flg);
1208: break;
1209: case MAT_NEW_DIAGONALS:
1210: PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
1211: break;
1212: case MAT_IGNORE_OFF_PROC_ENTRIES:
1213: a->donotstash = flg;
1214: break;
1215: case MAT_USE_HASH_TABLE:
1216: a->ht_flag = flg;
1217: break;
1218: case MAT_HERMITIAN:
1219: if (!A->assembled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Must call MatAssemblyEnd() first");
1220: MatSetOption(a->A,op,flg);
1221: A->ops->mult = MatMult_MPISBAIJ_Hermitian;
1222: break;
1223: case MAT_SPD:
1224: A->spd_set = PETSC_TRUE;
1225: A->spd = flg;
1226: if (flg) {
1227: A->symmetric = PETSC_TRUE;
1228: A->structurally_symmetric = PETSC_TRUE;
1229: A->symmetric_set = PETSC_TRUE;
1230: A->structurally_symmetric_set = PETSC_TRUE;
1231: }
1232: break;
1233: case MAT_SYMMETRIC:
1234: MatSetOption(a->A,op,flg);
1235: break;
1236: case MAT_STRUCTURALLY_SYMMETRIC:
1237: MatSetOption(a->A,op,flg);
1238: break;
1239: case MAT_SYMMETRY_ETERNAL:
1240: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Matrix must be symmetric");
1241: PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
1242: break;
1243: case MAT_IGNORE_LOWER_TRIANGULAR:
1244: aA->ignore_ltriangular = flg;
1245: break;
1246: case MAT_ERROR_LOWER_TRIANGULAR:
1247: aA->ignore_ltriangular = flg;
1248: break;
1249: case MAT_GETROW_UPPERTRIANGULAR:
1250: aA->getrow_utriangular = flg;
1251: break;
1252: default:
1253: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unknown option %d",op);
1254: }
1255: return(0);
1256: }
1260: PetscErrorCode MatTranspose_MPISBAIJ(Mat A,MatReuse reuse,Mat *B)
1261: {
1264: if (MAT_INITIAL_MATRIX || *B != A) {
1265: MatDuplicate(A,MAT_COPY_VALUES,B);
1266: }
1267: return(0);
1268: }
1272: PetscErrorCode MatDiagonalScale_MPISBAIJ(Mat mat,Vec ll,Vec rr)
1273: {
1274: Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1275: Mat a=baij->A, b=baij->B;
1277: PetscInt nv,m,n;
1278: PetscBool flg;
1281: if (ll != rr){
1282: VecEqual(ll,rr,&flg);
1283: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"For symmetric format, left and right scaling vectors must be same\n");
1284: }
1285: if (!ll) return(0);
1287: MatGetLocalSize(mat,&m,&n);
1288: if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"For symmetric format, local size %d %d must be same",m,n);
1289:
1290: VecGetLocalSize(rr,&nv);
1291: if (nv!=n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Left and right vector non-conforming local size");
1293: VecScatterBegin(baij->Mvctx,rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD);
1294:
1295: /* left diagonalscale the off-diagonal part */
1296: (*b->ops->diagonalscale)(b,ll,PETSC_NULL);
1297:
1298: /* scale the diagonal part */
1299: (*a->ops->diagonalscale)(a,ll,rr);
1301: /* right diagonalscale the off-diagonal part */
1302: VecScatterEnd(baij->Mvctx,rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD);
1303: (*b->ops->diagonalscale)(b,PETSC_NULL,baij->lvec);
1304: return(0);
1305: }
1309: PetscErrorCode MatSetUnfactored_MPISBAIJ(Mat A)
1310: {
1311: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1315: MatSetUnfactored(a->A);
1316: return(0);
1317: }
1319: static PetscErrorCode MatDuplicate_MPISBAIJ(Mat,MatDuplicateOption,Mat *);
1323: PetscErrorCode MatEqual_MPISBAIJ(Mat A,Mat B,PetscBool *flag)
1324: {
1325: Mat_MPISBAIJ *matB = (Mat_MPISBAIJ*)B->data,*matA = (Mat_MPISBAIJ*)A->data;
1326: Mat a,b,c,d;
1327: PetscBool flg;
1331: a = matA->A; b = matA->B;
1332: c = matB->A; d = matB->B;
1334: MatEqual(a,c,&flg);
1335: if (flg) {
1336: MatEqual(b,d,&flg);
1337: }
1338: MPI_Allreduce(&flg,flag,1,MPI_INT,MPI_LAND,((PetscObject)A)->comm);
1339: return(0);
1340: }
1344: PetscErrorCode MatCopy_MPISBAIJ(Mat A,Mat B,MatStructure str)
1345: {
1347: Mat_MPISBAIJ *a = (Mat_MPISBAIJ *)A->data;
1348: Mat_MPISBAIJ *b = (Mat_MPISBAIJ *)B->data;
1351: /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
1352: if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
1353: MatGetRowUpperTriangular(A);
1354: MatCopy_Basic(A,B,str);
1355: MatRestoreRowUpperTriangular(A);
1356: } else {
1357: MatCopy(a->A,b->A,str);
1358: MatCopy(a->B,b->B,str);
1359: }
1360: return(0);
1361: }
1365: PetscErrorCode MatSetUpPreallocation_MPISBAIJ(Mat A)
1366: {
1370: MatMPISBAIJSetPreallocation(A,-PetscMax(A->rmap->bs,1),PETSC_DEFAULT,0,PETSC_DEFAULT,0);
1371: return(0);
1372: }
1376: PetscErrorCode MatAXPY_MPISBAIJ(Mat Y,PetscScalar a,Mat X,MatStructure str)
1377: {
1379: Mat_MPISBAIJ *xx=(Mat_MPISBAIJ *)X->data,*yy=(Mat_MPISBAIJ *)Y->data;
1380: PetscBLASInt bnz,one=1;
1381: Mat_SeqSBAIJ *xa,*ya;
1382: Mat_SeqBAIJ *xb,*yb;
1385: if (str == SAME_NONZERO_PATTERN) {
1386: PetscScalar alpha = a;
1387: xa = (Mat_SeqSBAIJ *)xx->A->data;
1388: ya = (Mat_SeqSBAIJ *)yy->A->data;
1389: bnz = PetscBLASIntCast(xa->nz);
1390: BLASaxpy_(&bnz,&alpha,xa->a,&one,ya->a,&one);
1391: xb = (Mat_SeqBAIJ *)xx->B->data;
1392: yb = (Mat_SeqBAIJ *)yy->B->data;
1393: bnz = PetscBLASIntCast(xb->nz);
1394: BLASaxpy_(&bnz,&alpha,xb->a,&one,yb->a,&one);
1395: } else {
1396: MatGetRowUpperTriangular(X);
1397: MatAXPY_Basic(Y,a,X,str);
1398: MatRestoreRowUpperTriangular(X);
1399: }
1400: return(0);
1401: }
1405: PetscErrorCode MatSetBlockSize_MPISBAIJ(Mat A,PetscInt bs)
1406: {
1407: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1408: PetscInt rbs,cbs;
1409: PetscErrorCode ierr;
1412: MatSetBlockSize(a->A,bs);
1413: MatSetBlockSize(a->B,bs);
1414: PetscLayoutGetBlockSize(A->rmap,&rbs);
1415: PetscLayoutGetBlockSize(A->cmap,&cbs);
1416: if (rbs != bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Attempt to set block size %d with SBAIJ %d",bs,rbs);
1417: if (cbs != bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Attempt to set block size %d with SBAIJ %d",bs,cbs);
1418: return(0);
1419: }
1423: PetscErrorCode MatGetSubMatrices_MPISBAIJ(Mat A,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *B[])
1424: {
1426: PetscInt i;
1427: PetscBool flg;
1430: for (i=0; i<n; i++) {
1431: ISEqual(irow[i],icol[i],&flg);
1432: if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Can only get symmetric submatrix for MPISBAIJ matrices");
1433: }
1434: MatGetSubMatrices_MPIBAIJ(A,n,irow,icol,scall,B);
1435: return(0);
1436: }
1437:
1439: /* -------------------------------------------------------------------*/
1440: static struct _MatOps MatOps_Values = {
1441: MatSetValues_MPISBAIJ,
1442: MatGetRow_MPISBAIJ,
1443: MatRestoreRow_MPISBAIJ,
1444: MatMult_MPISBAIJ,
1445: /* 4*/ MatMultAdd_MPISBAIJ,
1446: MatMult_MPISBAIJ, /* transpose versions are same as non-transpose */
1447: MatMultAdd_MPISBAIJ,
1448: 0,
1449: 0,
1450: 0,
1451: /*10*/ 0,
1452: 0,
1453: 0,
1454: MatSOR_MPISBAIJ,
1455: MatTranspose_MPISBAIJ,
1456: /*15*/ MatGetInfo_MPISBAIJ,
1457: MatEqual_MPISBAIJ,
1458: MatGetDiagonal_MPISBAIJ,
1459: MatDiagonalScale_MPISBAIJ,
1460: MatNorm_MPISBAIJ,
1461: /*20*/ MatAssemblyBegin_MPISBAIJ,
1462: MatAssemblyEnd_MPISBAIJ,
1463: MatSetOption_MPISBAIJ,
1464: MatZeroEntries_MPISBAIJ,
1465: /*24*/ 0,
1466: 0,
1467: 0,
1468: 0,
1469: 0,
1470: /*29*/ MatSetUpPreallocation_MPISBAIJ,
1471: 0,
1472: 0,
1473: 0,
1474: 0,
1475: /*34*/ MatDuplicate_MPISBAIJ,
1476: 0,
1477: 0,
1478: 0,
1479: 0,
1480: /*39*/ MatAXPY_MPISBAIJ,
1481: MatGetSubMatrices_MPISBAIJ,
1482: MatIncreaseOverlap_MPISBAIJ,
1483: MatGetValues_MPISBAIJ,
1484: MatCopy_MPISBAIJ,
1485: /*44*/ 0,
1486: MatScale_MPISBAIJ,
1487: 0,
1488: 0,
1489: 0,
1490: /*49*/ MatSetBlockSize_MPISBAIJ,
1491: 0,
1492: 0,
1493: 0,
1494: 0,
1495: /*54*/ 0,
1496: 0,
1497: MatSetUnfactored_MPISBAIJ,
1498: 0,
1499: MatSetValuesBlocked_MPISBAIJ,
1500: /*59*/ 0,
1501: 0,
1502: 0,
1503: 0,
1504: 0,
1505: /*64*/ 0,
1506: 0,
1507: 0,
1508: 0,
1509: 0,
1510: /*69*/ MatGetRowMaxAbs_MPISBAIJ,
1511: 0,
1512: 0,
1513: 0,
1514: 0,
1515: /*74*/ 0,
1516: 0,
1517: 0,
1518: 0,
1519: 0,
1520: /*79*/ 0,
1521: 0,
1522: 0,
1523: 0,
1524: MatLoad_MPISBAIJ,
1525: /*84*/ 0,
1526: 0,
1527: 0,
1528: 0,
1529: 0,
1530: /*89*/ 0,
1531: 0,
1532: 0,
1533: 0,
1534: 0,
1535: /*94*/ 0,
1536: 0,
1537: 0,
1538: 0,
1539: 0,
1540: /*99*/ 0,
1541: 0,
1542: 0,
1543: 0,
1544: 0,
1545: /*104*/0,
1546: MatRealPart_MPISBAIJ,
1547: MatImaginaryPart_MPISBAIJ,
1548: MatGetRowUpperTriangular_MPISBAIJ,
1549: MatRestoreRowUpperTriangular_MPISBAIJ,
1550: /*109*/0,
1551: 0,
1552: 0,
1553: 0,
1554: 0,
1555: /*114*/0,
1556: 0,
1557: 0,
1558: 0,
1559: 0,
1560: /*119*/0,
1561: 0,
1562: 0,
1563: 0
1564: };
1570: PetscErrorCode MatGetDiagonalBlock_MPISBAIJ(Mat A,Mat *a)
1571: {
1573: *a = ((Mat_MPISBAIJ *)A->data)->A;
1574: return(0);
1575: }
1581: PetscErrorCode MatMPISBAIJSetPreallocation_MPISBAIJ(Mat B,PetscInt bs,PetscInt d_nz,PetscInt *d_nnz,PetscInt o_nz,PetscInt *o_nnz)
1582: {
1583: Mat_MPISBAIJ *b;
1585: PetscInt i,mbs,Mbs,newbs = PetscAbs(bs);
1588: if (bs < 0){
1589: PetscOptionsBegin(((PetscObject)B)->comm,((PetscObject)B)->prefix,"Options for MPISBAIJ matrix","Mat");
1590: PetscOptionsInt("-mat_block_size","Set the blocksize used to store the matrix","MatMPIBAIJSetPreallocation",newbs,&newbs,PETSC_NULL);
1591: PetscOptionsEnd();
1592: bs = PetscAbs(bs);
1593: }
1594: if ((d_nnz || o_nnz) && newbs != bs) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Cannot change blocksize from command line if setting d_nnz or o_nnz");
1595: bs = newbs;
1597: if (d_nz == PETSC_DECIDE || d_nz == PETSC_DEFAULT) d_nz = 3;
1598: if (o_nz == PETSC_DECIDE || o_nz == PETSC_DEFAULT) o_nz = 1;
1599: if (d_nz < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"d_nz cannot be less than 0: value %D",d_nz);
1600: if (o_nz < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"o_nz cannot be less than 0: value %D",o_nz);
1602: B->rmap->bs = B->cmap->bs = bs;
1603: PetscLayoutSetUp(B->rmap);
1604: PetscLayoutSetUp(B->cmap);
1606: if (d_nnz) {
1607: for (i=0; i<B->rmap->n/bs; i++) {
1608: if (d_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than -1: local row %D value %D",i,d_nnz[i]);
1609: }
1610: }
1611: if (o_nnz) {
1612: for (i=0; i<B->rmap->n/bs; i++) {
1613: if (o_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than -1: local row %D value %D",i,o_nnz[i]);
1614: }
1615: }
1617: b = (Mat_MPISBAIJ*)B->data;
1618: mbs = B->rmap->n/bs;
1619: Mbs = B->rmap->N/bs;
1620: if (mbs*bs != B->rmap->n) {
1621: SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"No of local rows %D must be divisible by blocksize %D",B->rmap->N,bs);
1622: }
1624: B->rmap->bs = bs;
1625: b->bs2 = bs*bs;
1626: b->mbs = mbs;
1627: b->nbs = mbs;
1628: b->Mbs = Mbs;
1629: b->Nbs = Mbs;
1631: for (i=0; i<=b->size; i++) {
1632: b->rangebs[i] = B->rmap->range[i]/bs;
1633: }
1634: b->rstartbs = B->rmap->rstart/bs;
1635: b->rendbs = B->rmap->rend/bs;
1636:
1637: b->cstartbs = B->cmap->rstart/bs;
1638: b->cendbs = B->cmap->rend/bs;
1640: if (!B->preallocated) {
1641: MatCreate(PETSC_COMM_SELF,&b->A);
1642: MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);
1643: MatSetType(b->A,MATSEQSBAIJ);
1644: PetscLogObjectParent(B,b->A);
1645: MatCreate(PETSC_COMM_SELF,&b->B);
1646: MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);
1647: MatSetType(b->B,MATSEQBAIJ);
1648: PetscLogObjectParent(B,b->B);
1649: MatStashCreate_Private(((PetscObject)B)->comm,bs,&B->bstash);
1650: }
1652: MatSeqSBAIJSetPreallocation(b->A,bs,d_nz,d_nnz);
1653: MatSeqBAIJSetPreallocation(b->B,bs,o_nz,o_nnz);
1654: B->preallocated = PETSC_TRUE;
1655: return(0);
1656: }
1662: PetscErrorCode MatMPISBAIJSetPreallocationCSR_MPISBAIJ(Mat B,PetscInt bs,const PetscInt ii[],const PetscInt jj[],const PetscScalar V[])
1663: {
1664: PetscInt m,rstart,cstart,cend;
1665: PetscInt i,j,d,nz,nz_max=0,*d_nnz=0,*o_nnz=0;
1666: const PetscInt *JJ=0;
1667: PetscScalar *values=0;
1672: if (bs < 1) SETERRQ1(((PetscObject)B)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Invalid block size specified, must be positive but it is %D",bs);
1673: PetscLayoutSetBlockSize(B->rmap,bs);
1674: PetscLayoutSetBlockSize(B->cmap,bs);
1675: PetscLayoutSetUp(B->rmap);
1676: PetscLayoutSetUp(B->cmap);
1677: m = B->rmap->n/bs;
1678: rstart = B->rmap->rstart/bs;
1679: cstart = B->cmap->rstart/bs;
1680: cend = B->cmap->rend/bs;
1682: if (ii[0]) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"ii[0] must be 0 but it is %D",ii[0]);
1683: PetscMalloc2(m,PetscInt,&d_nnz,m,PetscInt,&o_nnz);
1684: for (i=0; i<m; i++) {
1685: nz = ii[i+1] - ii[i];
1686: if (nz < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local row %D has a negative number of columns %D",i,nz);
1687: nz_max = PetscMax(nz_max,nz);
1688: JJ = jj + ii[i];
1689: for (j=0; j<nz; j++) {
1690: if (*JJ >= cstart) break;
1691: JJ++;
1692: }
1693: d = 0;
1694: for (; j<nz; j++) {
1695: if (*JJ++ >= cend) break;
1696: d++;
1697: }
1698: d_nnz[i] = d;
1699: o_nnz[i] = nz - d;
1700: }
1701: MatMPISBAIJSetPreallocation(B,bs,0,d_nnz,0,o_nnz);
1702: PetscFree2(d_nnz,o_nnz);
1704: values = (PetscScalar*)V;
1705: if (!values) {
1706: PetscMalloc(bs*bs*nz_max*sizeof(PetscScalar),&values);
1707: PetscMemzero(values,bs*bs*nz_max*sizeof(PetscScalar));
1708: }
1709: for (i=0; i<m; i++) {
1710: PetscInt row = i + rstart;
1711: PetscInt ncols = ii[i+1] - ii[i];
1712: const PetscInt *icols = jj + ii[i];
1713: const PetscScalar *svals = values + (V ? (bs*bs*ii[i]) : 0);
1714: MatSetValuesBlocked_MPISBAIJ(B,1,&row,ncols,icols,svals,INSERT_VALUES);
1715: }
1717: if (!V) { PetscFree(values); }
1718: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
1719: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
1721: return(0);
1722: }
1726: #if defined(PETSC_HAVE_MUMPS)
1728: #endif
1729: #if defined(PETSC_HAVE_SPOOLES)
1731: #endif
1732: #if defined(PETSC_HAVE_PASTIX)
1734: #endif
1737: /*MC
1738: MATMPISBAIJ - MATMPISBAIJ = "mpisbaij" - A matrix type to be used for distributed symmetric sparse block matrices,
1739: based on block compressed sparse row format. Only the upper triangular portion of the "diagonal" portion of
1740: the matrix is stored.
1742: For complex numbers by default this matrix is symmetric, NOT Hermitian symmetric. To make it Hermitian symmetric you
1743: can call MatSetOption(Mat, MAT_HERMITIAN);
1745: Options Database Keys:
1746: . -mat_type mpisbaij - sets the matrix type to "mpisbaij" during a call to MatSetFromOptions()
1748: Level: beginner
1750: .seealso: MatCreateMPISBAIJ
1751: M*/
1760: PetscErrorCode MatCreate_MPISBAIJ(Mat B)
1761: {
1762: Mat_MPISBAIJ *b;
1764: PetscBool flg;
1768: PetscNewLog(B,Mat_MPISBAIJ,&b);
1769: B->data = (void*)b;
1770: PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));
1772: B->ops->destroy = MatDestroy_MPISBAIJ;
1773: B->ops->view = MatView_MPISBAIJ;
1774: B->assembled = PETSC_FALSE;
1776: B->insertmode = NOT_SET_VALUES;
1777: MPI_Comm_rank(((PetscObject)B)->comm,&b->rank);
1778: MPI_Comm_size(((PetscObject)B)->comm,&b->size);
1780: /* build local table of row and column ownerships */
1781: PetscMalloc((b->size+2)*sizeof(PetscInt),&b->rangebs);
1783: /* build cache for off array entries formed */
1784: MatStashCreate_Private(((PetscObject)B)->comm,1,&B->stash);
1785: b->donotstash = PETSC_FALSE;
1786: b->colmap = PETSC_NULL;
1787: b->garray = PETSC_NULL;
1788: b->roworiented = PETSC_TRUE;
1790: /* stuff used in block assembly */
1791: b->barray = 0;
1793: /* stuff used for matrix vector multiply */
1794: b->lvec = 0;
1795: b->Mvctx = 0;
1796: b->slvec0 = 0;
1797: b->slvec0b = 0;
1798: b->slvec1 = 0;
1799: b->slvec1a = 0;
1800: b->slvec1b = 0;
1801: b->sMvctx = 0;
1803: /* stuff for MatGetRow() */
1804: b->rowindices = 0;
1805: b->rowvalues = 0;
1806: b->getrowactive = PETSC_FALSE;
1808: /* hash table stuff */
1809: b->ht = 0;
1810: b->hd = 0;
1811: b->ht_size = 0;
1812: b->ht_flag = PETSC_FALSE;
1813: b->ht_fact = 0;
1814: b->ht_total_ct = 0;
1815: b->ht_insert_ct = 0;
1817: /* stuff for MatGetSubMatrices_MPIBAIJ_local() */
1818: b->ijonly = PETSC_FALSE;
1820: b->in_loc = 0;
1821: b->v_loc = 0;
1822: b->n_loc = 0;
1823: PetscOptionsBegin(((PetscObject)B)->comm,PETSC_NULL,"Options for loading MPISBAIJ matrix 1","Mat");
1824: PetscOptionsBool("-mat_use_hash_table","Use hash table to save memory in constructing matrix","MatSetOption",PETSC_FALSE,&flg,PETSC_NULL);
1825: if (flg) {
1826: PetscReal fact = 1.39;
1827: MatSetOption(B,MAT_USE_HASH_TABLE,PETSC_TRUE);
1828: PetscOptionsReal("-mat_use_hash_table","Use hash table factor","MatMPIBAIJSetHashTableFactor",fact,&fact,PETSC_NULL);
1829: if (fact <= 1.0) fact = 1.39;
1830: MatMPIBAIJSetHashTableFactor(B,fact);
1831: PetscInfo1(B,"Hash table Factor used %5.2f\n",fact);
1832: }
1833: PetscOptionsEnd();
1835: #if defined(PETSC_HAVE_PASTIX)
1836: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetFactor_pastix_C",
1837: "MatGetFactor_mpisbaij_pastix",
1838: MatGetFactor_mpisbaij_pastix);
1839: #endif
1840: #if defined(PETSC_HAVE_MUMPS)
1841: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetFactor_mumps_C",
1842: "MatGetFactor_sbaij_mumps",
1843: MatGetFactor_sbaij_mumps);
1844: #endif
1845: #if defined(PETSC_HAVE_SPOOLES)
1846: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetFactor_spooles_C",
1847: "MatGetFactor_mpisbaij_spooles",
1848: MatGetFactor_mpisbaij_spooles);
1849: #endif
1850: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatStoreValues_C",
1851: "MatStoreValues_MPISBAIJ",
1852: MatStoreValues_MPISBAIJ);
1853: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatRetrieveValues_C",
1854: "MatRetrieveValues_MPISBAIJ",
1855: MatRetrieveValues_MPISBAIJ);
1856: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetDiagonalBlock_C",
1857: "MatGetDiagonalBlock_MPISBAIJ",
1858: MatGetDiagonalBlock_MPISBAIJ);
1859: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPISBAIJSetPreallocation_C",
1860: "MatMPISBAIJSetPreallocation_MPISBAIJ",
1861: MatMPISBAIJSetPreallocation_MPISBAIJ);
1862: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPISBAIJSetPreallocationCSR_C",
1863: "MatMPISBAIJSetPreallocationCSR_MPISBAIJ",
1864: MatMPISBAIJSetPreallocationCSR_MPISBAIJ);
1865: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatConvert_mpisbaij_mpisbstrm_C",
1866: "MatConvert_MPISBAIJ_MPISBSTRM",
1867: MatConvert_MPISBAIJ_MPISBSTRM);
1869: B->symmetric = PETSC_TRUE;
1870: B->structurally_symmetric = PETSC_TRUE;
1871: B->symmetric_set = PETSC_TRUE;
1872: B->structurally_symmetric_set = PETSC_TRUE;
1873: PetscObjectChangeTypeName((PetscObject)B,MATMPISBAIJ);
1874: return(0);
1875: }
1878: /*MC
1879: MATSBAIJ - MATSBAIJ = "sbaij" - A matrix type to be used for symmetric block sparse matrices.
1881: This matrix type is identical to MATSEQSBAIJ when constructed with a single process communicator,
1882: and MATMPISBAIJ otherwise.
1884: Options Database Keys:
1885: . -mat_type sbaij - sets the matrix type to "sbaij" during a call to MatSetFromOptions()
1887: Level: beginner
1889: .seealso: MatCreateMPISBAIJ,MATSEQSBAIJ,MATMPISBAIJ
1890: M*/
1894: /*@C
1895: MatMPISBAIJSetPreallocation - For good matrix assembly performance
1896: the user should preallocate the matrix storage by setting the parameters
1897: d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately,
1898: performance can be increased by more than a factor of 50.
1900: Collective on Mat
1902: Input Parameters:
1903: + A - the matrix
1904: . bs - size of blockk
1905: . d_nz - number of block nonzeros per block row in diagonal portion of local
1906: submatrix (same for all local rows)
1907: . d_nnz - array containing the number of block nonzeros in the various block rows
1908: in the upper triangular and diagonal part of the in diagonal portion of the local
1909: (possibly different for each block row) or PETSC_NULL. You must leave room
1910: for the diagonal entry even if it is zero.
1911: . o_nz - number of block nonzeros per block row in the off-diagonal portion of local
1912: submatrix (same for all local rows).
1913: - o_nnz - array containing the number of nonzeros in the various block rows of the
1914: off-diagonal portion of the local submatrix (possibly different for
1915: each block row) or PETSC_NULL.
1918: Options Database Keys:
1919: . -mat_no_unroll - uses code that does not unroll the loops in the
1920: block calculations (much slower)
1921: . -mat_block_size - size of the blocks to use
1923: Notes:
1925: If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one processor
1926: than it must be used on all processors that share the object for that argument.
1928: If the *_nnz parameter is given then the *_nz parameter is ignored
1930: Storage Information:
1931: For a square global matrix we define each processor's diagonal portion
1932: to be its local rows and the corresponding columns (a square submatrix);
1933: each processor's off-diagonal portion encompasses the remainder of the
1934: local matrix (a rectangular submatrix).
1936: The user can specify preallocated storage for the diagonal part of
1937: the local submatrix with either d_nz or d_nnz (not both). Set
1938: d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
1939: memory allocation. Likewise, specify preallocated storage for the
1940: off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
1942: You can call MatGetInfo() to get information on how effective the preallocation was;
1943: for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
1944: You can also run with the option -info and look for messages with the string
1945: malloc in them to see if additional memory allocation was needed.
1947: Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
1948: the figure below we depict these three local rows and all columns (0-11).
1950: .vb
1951: 0 1 2 3 4 5 6 7 8 9 10 11
1952: -------------------
1953: row 3 | o o o d d d o o o o o o
1954: row 4 | o o o d d d o o o o o o
1955: row 5 | o o o d d d o o o o o o
1956: -------------------
1957: .ve
1958:
1959: Thus, any entries in the d locations are stored in the d (diagonal)
1960: submatrix, and any entries in the o locations are stored in the
1961: o (off-diagonal) submatrix. Note that the d matrix is stored in
1962: MatSeqSBAIJ format and the o submatrix in MATSEQBAIJ format.
1964: Now d_nz should indicate the number of block nonzeros per row in the upper triangular
1965: plus the diagonal part of the d matrix,
1966: and o_nz should indicate the number of block nonzeros per row in the upper triangular
1967: part of the o matrix.
1968: In general, for PDE problems in which most nonzeros are near the diagonal,
1969: one expects d_nz >> o_nz. For large problems you MUST preallocate memory
1970: or you will get TERRIBLE performance; see the users' manual chapter on
1971: matrices.
1973: Level: intermediate
1975: .keywords: matrix, block, aij, compressed row, sparse, parallel
1977: .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
1978: @*/
1979: PetscErrorCode MatMPISBAIJSetPreallocation(Mat B,PetscInt bs,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
1980: {
1984: PetscTryMethod(B,"MatMPISBAIJSetPreallocation_C",(Mat,PetscInt,PetscInt,const PetscInt[],PetscInt,const PetscInt[]),(B,bs,d_nz,d_nnz,o_nz,o_nnz));
1985: return(0);
1986: }
1990: /*@C
1991: MatCreateMPISBAIJ - Creates a sparse parallel matrix in symmetric block AIJ format
1992: (block compressed row). For good matrix assembly performance
1993: the user should preallocate the matrix storage by setting the parameters
1994: d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately,
1995: performance can be increased by more than a factor of 50.
1997: Collective on MPI_Comm
1999: Input Parameters:
2000: + comm - MPI communicator
2001: . bs - size of blockk
2002: . m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
2003: This value should be the same as the local size used in creating the
2004: y vector for the matrix-vector product y = Ax.
2005: . n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
2006: This value should be the same as the local size used in creating the
2007: x vector for the matrix-vector product y = Ax.
2008: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
2009: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
2010: . d_nz - number of block nonzeros per block row in diagonal portion of local
2011: submatrix (same for all local rows)
2012: . d_nnz - array containing the number of block nonzeros in the various block rows
2013: in the upper triangular portion of the in diagonal portion of the local
2014: (possibly different for each block block row) or PETSC_NULL.
2015: You must leave room for the diagonal entry even if it is zero.
2016: . o_nz - number of block nonzeros per block row in the off-diagonal portion of local
2017: submatrix (same for all local rows).
2018: - o_nnz - array containing the number of nonzeros in the various block rows of the
2019: off-diagonal portion of the local submatrix (possibly different for
2020: each block row) or PETSC_NULL.
2022: Output Parameter:
2023: . A - the matrix
2025: Options Database Keys:
2026: . -mat_no_unroll - uses code that does not unroll the loops in the
2027: block calculations (much slower)
2028: . -mat_block_size - size of the blocks to use
2029: . -mat_mpi - use the parallel matrix data structures even on one processor
2030: (defaults to using SeqBAIJ format on one processor)
2032: It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
2033: MatXXXXSetPreallocation() paradgm instead of this routine directly.
2034: [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
2036: Notes:
2037: The number of rows and columns must be divisible by blocksize.
2038: This matrix type does not support complex Hermitian operation.
2040: The user MUST specify either the local or global matrix dimensions
2041: (possibly both).
2043: If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one processor
2044: than it must be used on all processors that share the object for that argument.
2046: If the *_nnz parameter is given then the *_nz parameter is ignored
2048: Storage Information:
2049: For a square global matrix we define each processor's diagonal portion
2050: to be its local rows and the corresponding columns (a square submatrix);
2051: each processor's off-diagonal portion encompasses the remainder of the
2052: local matrix (a rectangular submatrix).
2054: The user can specify preallocated storage for the diagonal part of
2055: the local submatrix with either d_nz or d_nnz (not both). Set
2056: d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
2057: memory allocation. Likewise, specify preallocated storage for the
2058: off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
2060: Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
2061: the figure below we depict these three local rows and all columns (0-11).
2063: .vb
2064: 0 1 2 3 4 5 6 7 8 9 10 11
2065: -------------------
2066: row 3 | o o o d d d o o o o o o
2067: row 4 | o o o d d d o o o o o o
2068: row 5 | o o o d d d o o o o o o
2069: -------------------
2070: .ve
2071:
2072: Thus, any entries in the d locations are stored in the d (diagonal)
2073: submatrix, and any entries in the o locations are stored in the
2074: o (off-diagonal) submatrix. Note that the d matrix is stored in
2075: MatSeqSBAIJ format and the o submatrix in MATSEQBAIJ format.
2077: Now d_nz should indicate the number of block nonzeros per row in the upper triangular
2078: plus the diagonal part of the d matrix,
2079: and o_nz should indicate the number of block nonzeros per row in the o matrix.
2080: In general, for PDE problems in which most nonzeros are near the diagonal,
2081: one expects d_nz >> o_nz. For large problems you MUST preallocate memory
2082: or you will get TERRIBLE performance; see the users' manual chapter on
2083: matrices.
2085: Level: intermediate
2087: .keywords: matrix, block, aij, compressed row, sparse, parallel
2089: .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
2090: @*/
2092: PetscErrorCode MatCreateMPISBAIJ(MPI_Comm comm,PetscInt bs,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
2093: {
2095: PetscMPIInt size;
2098: MatCreate(comm,A);
2099: MatSetSizes(*A,m,n,M,N);
2100: MPI_Comm_size(comm,&size);
2101: if (size > 1) {
2102: MatSetType(*A,MATMPISBAIJ);
2103: MatMPISBAIJSetPreallocation(*A,bs,d_nz,d_nnz,o_nz,o_nnz);
2104: } else {
2105: MatSetType(*A,MATSEQSBAIJ);
2106: MatSeqSBAIJSetPreallocation(*A,bs,d_nz,d_nnz);
2107: }
2108: return(0);
2109: }
2114: static PetscErrorCode MatDuplicate_MPISBAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
2115: {
2116: Mat mat;
2117: Mat_MPISBAIJ *a,*oldmat = (Mat_MPISBAIJ*)matin->data;
2119: PetscInt len=0,nt,bs=matin->rmap->bs,mbs=oldmat->mbs;
2120: PetscScalar *array;
2123: *newmat = 0;
2124: MatCreate(((PetscObject)matin)->comm,&mat);
2125: MatSetSizes(mat,matin->rmap->n,matin->cmap->n,matin->rmap->N,matin->cmap->N);
2126: MatSetType(mat,((PetscObject)matin)->type_name);
2127: PetscMemcpy(mat->ops,matin->ops,sizeof(struct _MatOps));
2128: PetscLayoutReference(matin->rmap,&mat->rmap);
2129: PetscLayoutReference(matin->cmap,&mat->cmap);
2130:
2131: mat->factortype = matin->factortype;
2132: mat->preallocated = PETSC_TRUE;
2133: mat->assembled = PETSC_TRUE;
2134: mat->insertmode = NOT_SET_VALUES;
2136: a = (Mat_MPISBAIJ*)mat->data;
2137: a->bs2 = oldmat->bs2;
2138: a->mbs = oldmat->mbs;
2139: a->nbs = oldmat->nbs;
2140: a->Mbs = oldmat->Mbs;
2141: a->Nbs = oldmat->Nbs;
2144: a->size = oldmat->size;
2145: a->rank = oldmat->rank;
2146: a->donotstash = oldmat->donotstash;
2147: a->roworiented = oldmat->roworiented;
2148: a->rowindices = 0;
2149: a->rowvalues = 0;
2150: a->getrowactive = PETSC_FALSE;
2151: a->barray = 0;
2152: a->rstartbs = oldmat->rstartbs;
2153: a->rendbs = oldmat->rendbs;
2154: a->cstartbs = oldmat->cstartbs;
2155: a->cendbs = oldmat->cendbs;
2157: /* hash table stuff */
2158: a->ht = 0;
2159: a->hd = 0;
2160: a->ht_size = 0;
2161: a->ht_flag = oldmat->ht_flag;
2162: a->ht_fact = oldmat->ht_fact;
2163: a->ht_total_ct = 0;
2164: a->ht_insert_ct = 0;
2165:
2166: PetscMemcpy(a->rangebs,oldmat->rangebs,(a->size+2)*sizeof(PetscInt));
2167: if (oldmat->colmap) {
2168: #if defined (PETSC_USE_CTABLE)
2169: PetscTableCreateCopy(oldmat->colmap,&a->colmap);
2170: #else
2171: PetscMalloc((a->Nbs)*sizeof(PetscInt),&a->colmap);
2172: PetscLogObjectMemory(mat,(a->Nbs)*sizeof(PetscInt));
2173: PetscMemcpy(a->colmap,oldmat->colmap,(a->Nbs)*sizeof(PetscInt));
2174: #endif
2175: } else a->colmap = 0;
2177: if (oldmat->garray && (len = ((Mat_SeqBAIJ*)(oldmat->B->data))->nbs)) {
2178: PetscMalloc(len*sizeof(PetscInt),&a->garray);
2179: PetscLogObjectMemory(mat,len*sizeof(PetscInt));
2180: PetscMemcpy(a->garray,oldmat->garray,len*sizeof(PetscInt));
2181: } else a->garray = 0;
2182:
2183: MatStashCreate_Private(((PetscObject)matin)->comm,matin->rmap->bs,&mat->bstash);
2184: VecDuplicate(oldmat->lvec,&a->lvec);
2185: PetscLogObjectParent(mat,a->lvec);
2186: VecScatterCopy(oldmat->Mvctx,&a->Mvctx);
2187: PetscLogObjectParent(mat,a->Mvctx);
2189: VecDuplicate(oldmat->slvec0,&a->slvec0);
2190: PetscLogObjectParent(mat,a->slvec0);
2191: VecDuplicate(oldmat->slvec1,&a->slvec1);
2192: PetscLogObjectParent(mat,a->slvec1);
2194: VecGetLocalSize(a->slvec1,&nt);
2195: VecGetArray(a->slvec1,&array);
2196: VecCreateSeqWithArray(PETSC_COMM_SELF,bs*mbs,array,&a->slvec1a);
2197: VecCreateSeqWithArray(PETSC_COMM_SELF,nt-bs*mbs,array+bs*mbs,&a->slvec1b);
2198: VecRestoreArray(a->slvec1,&array);
2199: VecGetArray(a->slvec0,&array);
2200: VecCreateSeqWithArray(PETSC_COMM_SELF,nt-bs*mbs,array+bs*mbs,&a->slvec0b);
2201: VecRestoreArray(a->slvec0,&array);
2202: PetscLogObjectParent(mat,a->slvec0);
2203: PetscLogObjectParent(mat,a->slvec1);
2204: PetscLogObjectParent(mat,a->slvec0b);
2205: PetscLogObjectParent(mat,a->slvec1a);
2206: PetscLogObjectParent(mat,a->slvec1b);
2208: /* VecScatterCopy(oldmat->sMvctx,&a->sMvctx); - not written yet, replaced by the lazy trick: */
2209: PetscObjectReference((PetscObject)oldmat->sMvctx);
2210: a->sMvctx = oldmat->sMvctx;
2211: PetscLogObjectParent(mat,a->sMvctx);
2213: MatDuplicate(oldmat->A,cpvalues,&a->A);
2214: PetscLogObjectParent(mat,a->A);
2215: MatDuplicate(oldmat->B,cpvalues,&a->B);
2216: PetscLogObjectParent(mat,a->B);
2217: PetscFListDuplicate(((PetscObject)matin)->qlist,&((PetscObject)mat)->qlist);
2218: *newmat = mat;
2219: return(0);
2220: }
2224: PetscErrorCode MatLoad_MPISBAIJ(Mat newmat,PetscViewer viewer)
2225: {
2227: PetscInt i,nz,j,rstart,rend;
2228: PetscScalar *vals,*buf;
2229: MPI_Comm comm = ((PetscObject)viewer)->comm;
2230: MPI_Status status;
2231: PetscMPIInt rank,size,tag = ((PetscObject)viewer)->tag,*sndcounts = 0,*browners,maxnz,*rowners,*locrowlens,mmbs;
2232: PetscInt header[4],*rowlengths = 0,M,N,m,*cols;
2233: PetscInt *procsnz = 0,jj,*mycols,*ibuf;
2234: PetscInt bs=1,Mbs,mbs,extra_rows;
2235: PetscInt *dlens,*odlens,*mask,*masked1,*masked2,rowcount,odcount;
2236: PetscInt dcount,kmax,k,nzcount,tmp,sizesset=1,grows,gcols;
2237: int fd;
2238:
2240: PetscOptionsBegin(comm,PETSC_NULL,"Options for loading MPISBAIJ matrix 2","Mat");
2241: PetscOptionsInt("-matload_block_size","Set the blocksize used to store the matrix","MatLoad",bs,&bs,PETSC_NULL);
2242: PetscOptionsEnd();
2244: MPI_Comm_size(comm,&size);
2245: MPI_Comm_rank(comm,&rank);
2246: if (!rank) {
2247: PetscViewerBinaryGetDescriptor(viewer,&fd);
2248: PetscBinaryRead(fd,(char *)header,4,PETSC_INT);
2249: if (header[0] != MAT_FILE_CLASSID) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
2250: if (header[3] < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format, cannot load as MPISBAIJ");
2251: }
2253: if (newmat->rmap->n < 0 && newmat->rmap->N < 0 && newmat->cmap->n < 0 && newmat->cmap->N < 0) sizesset = 0;
2255: MPI_Bcast(header+1,3,MPIU_INT,0,comm);
2256: M = header[1]; N = header[2];
2258: /* If global rows/cols are set to PETSC_DECIDE, set it to the sizes given in the file */
2259: if (sizesset && newmat->rmap->N < 0) newmat->rmap->N = M;
2260: if (sizesset && newmat->cmap->N < 0) newmat->cmap->N = N;
2261:
2262: /* If global sizes are set, check if they are consistent with that given in the file */
2263: if (sizesset) {
2264: MatGetSize(newmat,&grows,&gcols);
2265: }
2266: if (sizesset && newmat->rmap->N != grows) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Inconsistent # of rows:Matrix in file has (%d) and input matrix has (%d)",M,grows);
2267: if (sizesset && newmat->cmap->N != gcols) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Inconsistent # of cols:Matrix in file has (%d) and input matrix has (%d)",N,gcols);
2269: if (M != N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Can only do square matrices");
2271: /*
2272: This code adds extra rows to make sure the number of rows is
2273: divisible by the blocksize
2274: */
2275: Mbs = M/bs;
2276: extra_rows = bs - M + bs*(Mbs);
2277: if (extra_rows == bs) extra_rows = 0;
2278: else Mbs++;
2279: if (extra_rows &&!rank) {
2280: PetscInfo(viewer,"Padding loaded matrix to match blocksize\n");
2281: }
2283: /* determine ownership of all rows */
2284: if (newmat->rmap->n < 0) { /* PETSC_DECIDE */
2285: mbs = Mbs/size + ((Mbs % size) > rank);
2286: m = mbs*bs;
2287: } else { /* User Set */
2288: m = newmat->rmap->n;
2289: mbs = m/bs;
2290: }
2291: PetscMalloc2(size+1,PetscMPIInt,&rowners,size+1,PetscMPIInt,&browners);
2292: mmbs = PetscMPIIntCast(mbs);
2293: MPI_Allgather(&mmbs,1,MPI_INT,rowners+1,1,MPI_INT,comm);
2294: rowners[0] = 0;
2295: for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
2296: for (i=0; i<=size; i++) browners[i] = rowners[i]*bs;
2297: rstart = rowners[rank];
2298: rend = rowners[rank+1];
2299:
2300: /* distribute row lengths to all processors */
2301: PetscMalloc((rend-rstart)*bs*sizeof(PetscMPIInt),&locrowlens);
2302: if (!rank) {
2303: PetscMalloc((M+extra_rows)*sizeof(PetscInt),&rowlengths);
2304: PetscBinaryRead(fd,rowlengths,M,PETSC_INT);
2305: for (i=0; i<extra_rows; i++) rowlengths[M+i] = 1;
2306: PetscMalloc(size*sizeof(PetscMPIInt),&sndcounts);
2307: for (i=0; i<size; i++) sndcounts[i] = browners[i+1] - browners[i];
2308: MPI_Scatterv(rowlengths,sndcounts,browners,MPIU_INT,locrowlens,(rend-rstart)*bs,MPIU_INT,0,comm);
2309: PetscFree(sndcounts);
2310: } else {
2311: MPI_Scatterv(0,0,0,MPIU_INT,locrowlens,(rend-rstart)*bs,MPIU_INT,0,comm);
2312: }
2313:
2314: if (!rank) { /* procs[0] */
2315: /* calculate the number of nonzeros on each processor */
2316: PetscMalloc(size*sizeof(PetscInt),&procsnz);
2317: PetscMemzero(procsnz,size*sizeof(PetscInt));
2318: for (i=0; i<size; i++) {
2319: for (j=rowners[i]*bs; j< rowners[i+1]*bs; j++) {
2320: procsnz[i] += rowlengths[j];
2321: }
2322: }
2323: PetscFree(rowlengths);
2324:
2325: /* determine max buffer needed and allocate it */
2326: maxnz = 0;
2327: for (i=0; i<size; i++) {
2328: maxnz = PetscMax(maxnz,procsnz[i]);
2329: }
2330: PetscMalloc(maxnz*sizeof(PetscInt),&cols);
2332: /* read in my part of the matrix column indices */
2333: nz = procsnz[0];
2334: PetscMalloc(nz*sizeof(PetscInt),&ibuf);
2335: mycols = ibuf;
2336: if (size == 1) nz -= extra_rows;
2337: PetscBinaryRead(fd,mycols,nz,PETSC_INT);
2338: if (size == 1) for (i=0; i< extra_rows; i++) { mycols[nz+i] = M+i; }
2340: /* read in every ones (except the last) and ship off */
2341: for (i=1; i<size-1; i++) {
2342: nz = procsnz[i];
2343: PetscBinaryRead(fd,cols,nz,PETSC_INT);
2344: MPI_Send(cols,nz,MPIU_INT,i,tag,comm);
2345: }
2346: /* read in the stuff for the last proc */
2347: if (size != 1) {
2348: nz = procsnz[size-1] - extra_rows; /* the extra rows are not on the disk */
2349: PetscBinaryRead(fd,cols,nz,PETSC_INT);
2350: for (i=0; i<extra_rows; i++) cols[nz+i] = M+i;
2351: MPI_Send(cols,nz+extra_rows,MPIU_INT,size-1,tag,comm);
2352: }
2353: PetscFree(cols);
2354: } else { /* procs[i], i>0 */
2355: /* determine buffer space needed for message */
2356: nz = 0;
2357: for (i=0; i<m; i++) {
2358: nz += locrowlens[i];
2359: }
2360: PetscMalloc(nz*sizeof(PetscInt),&ibuf);
2361: mycols = ibuf;
2362: /* receive message of column indices*/
2363: MPI_Recv(mycols,nz,MPIU_INT,0,tag,comm,&status);
2364: MPI_Get_count(&status,MPIU_INT,&maxnz);
2365: if (maxnz != nz) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2366: }
2368: /* loop over local rows, determining number of off diagonal entries */
2369: PetscMalloc2(rend-rstart,PetscInt,&dlens,rend-rstart,PetscInt,&odlens);
2370: PetscMalloc3(Mbs,PetscInt,&mask,Mbs,PetscInt,&masked1,Mbs,PetscInt,&masked2);
2371: PetscMemzero(mask,Mbs*sizeof(PetscInt));
2372: PetscMemzero(masked1,Mbs*sizeof(PetscInt));
2373: PetscMemzero(masked2,Mbs*sizeof(PetscInt));
2374: rowcount = 0;
2375: nzcount = 0;
2376: for (i=0; i<mbs; i++) {
2377: dcount = 0;
2378: odcount = 0;
2379: for (j=0; j<bs; j++) {
2380: kmax = locrowlens[rowcount];
2381: for (k=0; k<kmax; k++) {
2382: tmp = mycols[nzcount++]/bs; /* block col. index */
2383: if (!mask[tmp]) {
2384: mask[tmp] = 1;
2385: if (tmp < rstart || tmp >= rend) masked2[odcount++] = tmp; /* entry in off-diag portion */
2386: else masked1[dcount++] = tmp; /* entry in diag portion */
2387: }
2388: }
2389: rowcount++;
2390: }
2391:
2392: dlens[i] = dcount; /* d_nzz[i] */
2393: odlens[i] = odcount; /* o_nzz[i] */
2395: /* zero out the mask elements we set */
2396: for (j=0; j<dcount; j++) mask[masked1[j]] = 0;
2397: for (j=0; j<odcount; j++) mask[masked2[j]] = 0;
2398: }
2399: if (!sizesset) {
2400: MatSetSizes(newmat,m,m,M+extra_rows,N+extra_rows);
2401: }
2402: MatSetOption(newmat,MAT_IGNORE_LOWER_TRIANGULAR,PETSC_TRUE);
2403: MatMPISBAIJSetPreallocation(newmat,bs,0,dlens,0,odlens);
2404:
2405: if (!rank) {
2406: PetscMalloc(maxnz*sizeof(PetscScalar),&buf);
2407: /* read in my part of the matrix numerical values */
2408: nz = procsnz[0];
2409: vals = buf;
2410: mycols = ibuf;
2411: if (size == 1) nz -= extra_rows;
2412: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2413: if (size == 1) for (i=0; i< extra_rows; i++) { vals[nz+i] = 1.0; }
2415: /* insert into matrix */
2416: jj = rstart*bs;
2417: for (i=0; i<m; i++) {
2418: MatSetValues(newmat,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);
2419: mycols += locrowlens[i];
2420: vals += locrowlens[i];
2421: jj++;
2422: }
2424: /* read in other processors (except the last one) and ship out */
2425: for (i=1; i<size-1; i++) {
2426: nz = procsnz[i];
2427: vals = buf;
2428: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2429: MPI_Send(vals,nz,MPIU_SCALAR,i,((PetscObject)newmat)->tag,comm);
2430: }
2431: /* the last proc */
2432: if (size != 1){
2433: nz = procsnz[i] - extra_rows;
2434: vals = buf;
2435: PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2436: for (i=0; i<extra_rows; i++) vals[nz+i] = 1.0;
2437: MPI_Send(vals,nz+extra_rows,MPIU_SCALAR,size-1,((PetscObject)newmat)->tag,comm);
2438: }
2439: PetscFree(procsnz);
2441: } else {
2442: /* receive numeric values */
2443: PetscMalloc(nz*sizeof(PetscScalar),&buf);
2445: /* receive message of values*/
2446: vals = buf;
2447: mycols = ibuf;
2448: MPI_Recv(vals,nz,MPIU_SCALAR,0,((PetscObject)newmat)->tag,comm,&status);
2449: MPI_Get_count(&status,MPIU_SCALAR,&maxnz);
2450: if (maxnz != nz) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2452: /* insert into matrix */
2453: jj = rstart*bs;
2454: for (i=0; i<m; i++) {
2455: MatSetValues_MPISBAIJ(newmat,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);
2456: mycols += locrowlens[i];
2457: vals += locrowlens[i];
2458: jj++;
2459: }
2460: }
2462: PetscFree(locrowlens);
2463: PetscFree(buf);
2464: PetscFree(ibuf);
2465: PetscFree2(rowners,browners);
2466: PetscFree2(dlens,odlens);
2467: PetscFree3(mask,masked1,masked2);
2468: MatAssemblyBegin(newmat,MAT_FINAL_ASSEMBLY);
2469: MatAssemblyEnd(newmat,MAT_FINAL_ASSEMBLY);
2470: return(0);
2471: }
2475: /*XXXXX@
2476: MatMPISBAIJSetHashTableFactor - Sets the factor required to compute the size of the HashTable.
2478: Input Parameters:
2479: . mat - the matrix
2480: . fact - factor
2482: Not Collective on Mat, each process can have a different hash factor
2484: Level: advanced
2486: Notes:
2487: This can also be set by the command line option: -mat_use_hash_table fact
2489: .keywords: matrix, hashtable, factor, HT
2491: .seealso: MatSetOption()
2492: @XXXXX*/
2497: PetscErrorCode MatGetRowMaxAbs_MPISBAIJ(Mat A,Vec v,PetscInt idx[])
2498: {
2499: Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
2500: Mat_SeqBAIJ *b = (Mat_SeqBAIJ*)(a->B)->data;
2501: PetscReal atmp;
2502: PetscReal *work,*svalues,*rvalues;
2504: PetscInt i,bs,mbs,*bi,*bj,brow,j,ncols,krow,kcol,col,row,Mbs,bcol;
2505: PetscMPIInt rank,size;
2506: PetscInt *rowners_bs,dest,count,source;
2507: PetscScalar *va;
2508: MatScalar *ba;
2509: MPI_Status stat;
2512: if (idx) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Send email to petsc-maint@mcs.anl.gov");
2513: MatGetRowMaxAbs(a->A,v,PETSC_NULL);
2514: VecGetArray(v,&va);
2516: MPI_Comm_size(((PetscObject)A)->comm,&size);
2517: MPI_Comm_rank(((PetscObject)A)->comm,&rank);
2519: bs = A->rmap->bs;
2520: mbs = a->mbs;
2521: Mbs = a->Mbs;
2522: ba = b->a;
2523: bi = b->i;
2524: bj = b->j;
2526: /* find ownerships */
2527: rowners_bs = A->rmap->range;
2529: /* each proc creates an array to be distributed */
2530: PetscMalloc(bs*Mbs*sizeof(PetscReal),&work);
2531: PetscMemzero(work,bs*Mbs*sizeof(PetscReal));
2533: /* row_max for B */
2534: if (rank != size-1){
2535: for (i=0; i<mbs; i++) {
2536: ncols = bi[1] - bi[0]; bi++;
2537: brow = bs*i;
2538: for (j=0; j<ncols; j++){
2539: bcol = bs*(*bj);
2540: for (kcol=0; kcol<bs; kcol++){
2541: col = bcol + kcol; /* local col index */
2542: col += rowners_bs[rank+1]; /* global col index */
2543: for (krow=0; krow<bs; krow++){
2544: atmp = PetscAbsScalar(*ba); ba++;
2545: row = brow + krow; /* local row index */
2546: if (PetscRealPart(va[row]) < atmp) va[row] = atmp;
2547: if (work[col] < atmp) work[col] = atmp;
2548: }
2549: }
2550: bj++;
2551: }
2552: }
2554: /* send values to its owners */
2555: for (dest=rank+1; dest<size; dest++){
2556: svalues = work + rowners_bs[dest];
2557: count = rowners_bs[dest+1]-rowners_bs[dest];
2558: MPI_Send(svalues,count,MPIU_REAL,dest,rank,((PetscObject)A)->comm);
2559: }
2560: }
2561:
2562: /* receive values */
2563: if (rank){
2564: rvalues = work;
2565: count = rowners_bs[rank+1]-rowners_bs[rank];
2566: for (source=0; source<rank; source++){
2567: MPI_Recv(rvalues,count,MPIU_REAL,MPI_ANY_SOURCE,MPI_ANY_TAG,((PetscObject)A)->comm,&stat);
2568: /* process values */
2569: for (i=0; i<count; i++){
2570: if (PetscRealPart(va[i]) < rvalues[i]) va[i] = rvalues[i];
2571: }
2572: }
2573: }
2575: VecRestoreArray(v,&va);
2576: PetscFree(work);
2577: return(0);
2578: }
2582: PetscErrorCode MatSOR_MPISBAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
2583: {
2584: Mat_MPISBAIJ *mat = (Mat_MPISBAIJ*)matin->data;
2585: PetscErrorCode ierr;
2586: PetscInt mbs=mat->mbs,bs=matin->rmap->bs;
2587: PetscScalar *x,*ptr,*from;
2588: Vec bb1;
2589: const PetscScalar *b;
2590:
2592: if (its <= 0 || lits <= 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Relaxation requires global its %D and local its %D both positive",its,lits);
2593: if (bs > 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"SSOR for block size > 1 is not yet implemented");
2595: if (flag == SOR_APPLY_UPPER) {
2596: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
2597: return(0);
2598: }
2600: if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP){
2601: if ( flag & SOR_ZERO_INITIAL_GUESS ) {
2602: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,lits,xx);
2603: its--;
2604: }
2606: VecDuplicate(bb,&bb1);
2607: while (its--){
2608:
2609: /* lower triangular part: slvec0b = - B^T*xx */
2610: (*mat->B->ops->multtranspose)(mat->B,xx,mat->slvec0b);
2611:
2612: /* copy xx into slvec0a */
2613: VecGetArray(mat->slvec0,&ptr);
2614: VecGetArray(xx,&x);
2615: PetscMemcpy(ptr,x,bs*mbs*sizeof(MatScalar));
2616: VecRestoreArray(mat->slvec0,&ptr);
2618: VecScale(mat->slvec0,-1.0);
2620: /* copy bb into slvec1a */
2621: VecGetArray(mat->slvec1,&ptr);
2622: VecGetArrayRead(bb,&b);
2623: PetscMemcpy(ptr,b,bs*mbs*sizeof(MatScalar));
2624: VecRestoreArray(mat->slvec1,&ptr);
2626: /* set slvec1b = 0 */
2627: VecSet(mat->slvec1b,0.0);
2629: VecScatterBegin(mat->sMvctx,mat->slvec0,mat->slvec1,ADD_VALUES,SCATTER_FORWARD);
2630: VecRestoreArray(xx,&x);
2631: VecRestoreArrayRead(bb,&b);
2632: VecScatterEnd(mat->sMvctx,mat->slvec0,mat->slvec1,ADD_VALUES,SCATTER_FORWARD);
2634: /* upper triangular part: bb1 = bb1 - B*x */
2635: (*mat->B->ops->multadd)(mat->B,mat->slvec1b,mat->slvec1a,bb1);
2636:
2637: /* local diagonal sweep */
2638: (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,lits,xx);
2639: }
2640: VecDestroy(&bb1);
2641: } else if ((flag & SOR_LOCAL_FORWARD_SWEEP) && (its == 1) && (flag & SOR_ZERO_INITIAL_GUESS)){
2642: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
2643: } else if ((flag & SOR_LOCAL_BACKWARD_SWEEP) && (its == 1) && (flag & SOR_ZERO_INITIAL_GUESS)){
2644: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);
2645: } else if (flag & SOR_EISENSTAT) {
2646: Vec xx1;
2647: PetscBool hasop;
2648: const PetscScalar *diag;
2649: PetscScalar *sl,scale = (omega - 2.0)/omega;
2650: PetscInt i,n;
2652: if (!mat->xx1) {
2653: VecDuplicate(bb,&mat->xx1);
2654: VecDuplicate(bb,&mat->bb1);
2655: }
2656: xx1 = mat->xx1;
2657: bb1 = mat->bb1;
2659: (*mat->A->ops->sor)(mat->A,bb,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_BACKWARD_SWEEP),fshift,lits,1,xx);
2661: if (!mat->diag) {
2662: /* this is wrong for same matrix with new nonzero values */
2663: MatGetVecs(matin,&mat->diag,PETSC_NULL);
2664: MatGetDiagonal(matin,mat->diag);
2665: }
2666: MatHasOperation(matin,MATOP_MULT_DIAGONAL_BLOCK,&hasop);
2668: if (hasop) {
2669: MatMultDiagonalBlock(matin,xx,bb1);
2670: VecAYPX(mat->slvec1a,scale,bb);
2671: } else {
2672: /*
2673: These two lines are replaced by code that may be a bit faster for a good compiler
2674: VecPointwiseMult(mat->slvec1a,mat->diag,xx);
2675: VecAYPX(mat->slvec1a,scale,bb);
2676: */
2677: VecGetArray(mat->slvec1a,&sl);
2678: VecGetArrayRead(mat->diag,&diag);
2679: VecGetArrayRead(bb,&b);
2680: VecGetArray(xx,&x);
2681: VecGetLocalSize(xx,&n);
2682: if (omega == 1.0) {
2683: for (i=0; i<n; i++) {
2684: sl[i] = b[i] - diag[i]*x[i];
2685: }
2686: PetscLogFlops(2.0*n);
2687: } else {
2688: for (i=0; i<n; i++) {
2689: sl[i] = b[i] + scale*diag[i]*x[i];
2690: }
2691: PetscLogFlops(3.0*n);
2692: }
2693: VecRestoreArray(mat->slvec1a,&sl);
2694: VecRestoreArrayRead(mat->diag,&diag);
2695: VecRestoreArrayRead(bb,&b);
2696: VecRestoreArray(xx,&x);
2697: }
2699: /* multiply off-diagonal portion of matrix */
2700: VecSet(mat->slvec1b,0.0);
2701: (*mat->B->ops->multtranspose)(mat->B,xx,mat->slvec0b);
2702: VecGetArray(mat->slvec0,&from);
2703: VecGetArray(xx,&x);
2704: PetscMemcpy(from,x,bs*mbs*sizeof(MatScalar));
2705: VecRestoreArray(mat->slvec0,&from);
2706: VecRestoreArray(xx,&x);
2707: VecScatterBegin(mat->sMvctx,mat->slvec0,mat->slvec1,ADD_VALUES,SCATTER_FORWARD);
2708: VecScatterEnd(mat->sMvctx,mat->slvec0,mat->slvec1,ADD_VALUES,SCATTER_FORWARD);
2709: (*mat->B->ops->multadd)(mat->B,mat->slvec1b,mat->slvec1a,mat->slvec1a);
2711: /* local sweep */
2712: (*mat->A->ops->sor)(mat->A,mat->slvec1a,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_FORWARD_SWEEP),fshift,lits,1,xx1);
2713: VecAXPY(xx,1.0,xx1);
2714: } else {
2715: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatSORType is not supported for SBAIJ matrix format");
2716: }
2717: return(0);
2718: }
2722: PetscErrorCode MatSOR_MPISBAIJ_2comm(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
2723: {
2724: Mat_MPISBAIJ *mat = (Mat_MPISBAIJ*)matin->data;
2726: Vec lvec1,bb1;
2727:
2729: if (its <= 0 || lits <= 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Relaxation requires global its %D and local its %D both positive",its,lits);
2730: if (matin->rmap->bs > 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"SSOR for block size > 1 is not yet implemented");
2732: if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP){
2733: if ( flag & SOR_ZERO_INITIAL_GUESS ) {
2734: (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,lits,xx);
2735: its--;
2736: }
2738: VecDuplicate(mat->lvec,&lvec1);
2739: VecDuplicate(bb,&bb1);
2740: while (its--){
2741: VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
2742:
2743: /* lower diagonal part: bb1 = bb - B^T*xx */
2744: (*mat->B->ops->multtranspose)(mat->B,xx,lvec1);
2745: VecScale(lvec1,-1.0);
2747: VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
2748: VecCopy(bb,bb1);
2749: VecScatterBegin(mat->Mvctx,lvec1,bb1,ADD_VALUES,SCATTER_REVERSE);
2751: /* upper diagonal part: bb1 = bb1 - B*x */
2752: VecScale(mat->lvec,-1.0);
2753: (*mat->B->ops->multadd)(mat->B,mat->lvec,bb1,bb1);
2755: VecScatterEnd(mat->Mvctx,lvec1,bb1,ADD_VALUES,SCATTER_REVERSE);
2756:
2757: /* diagonal sweep */
2758: (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,lits,xx);
2759: }
2760: VecDestroy(&lvec1);
2761: VecDestroy(&bb1);
2762: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatSORType is not supported for SBAIJ matrix format");
2763: return(0);
2764: }
2768: /*@
2769: MatCreateMPISBAIJWithArrays - creates a MPI SBAIJ matrix using arrays that contain in standard
2770: CSR format the local rows.
2772: Collective on MPI_Comm
2774: Input Parameters:
2775: + comm - MPI communicator
2776: . bs - the block size, only a block size of 1 is supported
2777: . m - number of local rows (Cannot be PETSC_DECIDE)
2778: . n - This value should be the same as the local size used in creating the
2779: x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
2780: calculated if N is given) For square matrices n is almost always m.
2781: . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
2782: . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
2783: . i - row indices
2784: . j - column indices
2785: - a - matrix values
2787: Output Parameter:
2788: . mat - the matrix
2790: Level: intermediate
2792: Notes:
2793: The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
2794: thus you CANNOT change the matrix entries by changing the values of a[] after you have
2795: called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.
2797: The i and j indices are 0 based, and i indices are indices corresponding to the local j array.
2799: .keywords: matrix, aij, compressed row, sparse, parallel
2801: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
2802: MPIAIJ, MatCreateMPIAIJ(), MatCreateMPIAIJWithSplitArrays()
2803: @*/
2804: PetscErrorCode MatCreateMPISBAIJWithArrays(MPI_Comm comm,PetscInt bs,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt i[],const PetscInt j[],const PetscScalar a[],Mat *mat)
2805: {
2810: if (i[0]) {
2811: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
2812: }
2813: if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
2814: MatCreate(comm,mat);
2815: MatSetSizes(*mat,m,n,M,N);
2816: MatSetType(*mat,MATMPISBAIJ);
2817: MatMPISBAIJSetPreallocationCSR(*mat,bs,i,j,a);
2818: return(0);
2819: }
2824: /*@C
2825: MatMPISBAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in BAIJ format
2826: (the default parallel PETSc format).
2828: Collective on MPI_Comm
2830: Input Parameters:
2831: + A - the matrix
2832: . bs - the block size
2833: . i - the indices into j for the start of each local row (starts with zero)
2834: . j - the column indices for each local row (starts with zero) these must be sorted for each row
2835: - v - optional values in the matrix
2837: Level: developer
2839: .keywords: matrix, aij, compressed row, sparse, parallel
2841: .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIBAIJSetPreallocation(), MatCreateMPIAIJ(), MPIAIJ
2842: @*/
2843: PetscErrorCode MatMPISBAIJSetPreallocationCSR(Mat B,PetscInt bs,const PetscInt i[],const PetscInt j[], const PetscScalar v[])
2844: {
2848: PetscTryMethod(B,"MatMPISBAIJSetPreallocationCSR_C",(Mat,PetscInt,const PetscInt[],const PetscInt[],const PetscScalar[]),(B,bs,i,j,v));
2849: return(0);
2850: }