Actual source code: sbaijov.c
2: /*
3: Routines to compute overlapping regions of a parallel MPI matrix.
4: Used for finding submatrices that were shared across processors.
5: */
6: #include <../src/mat/impls/sbaij/mpi/mpisbaij.h>
7: #include <petscbt.h>
9: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat,PetscInt,IS*);
10: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat,PetscInt*,PetscInt,PetscInt*,PetscBT*);
14: PetscErrorCode MatIncreaseOverlap_MPISBAIJ(Mat C,PetscInt is_max,IS is[],PetscInt ov)
15: {
17: PetscInt i,N=C->cmap->N, bs=C->rmap->bs,M=C->rmap->N,Mbs=M/bs,*nidx,isz,iov;
18: IS *is_new,*is_row;
19: Mat *submats;
20: Mat_MPISBAIJ *c=(Mat_MPISBAIJ*)C->data;
21: Mat_SeqSBAIJ *asub_i;
22: PetscBT table;
23: PetscInt *ai,brow,nz,nis,l,nmax,nstages_local,nstages,max_no,pos;
24: const PetscInt *idx;
25: PetscBool flg;
28: PetscMalloc(is_max*sizeof(IS),&is_new);
29: /* Convert the indices into block format */
30: ISCompressIndicesGeneral(N,C->rmap->n,bs,is_max,is,is_new);
31: if (ov < 0){ SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n");}
32:
33: /* ----- previous non-scalable implementation ----- */
34: flg=PETSC_FALSE;
35: PetscOptionsHasName(PETSC_NULL, "-IncreaseOverlap_old", &flg);
36: if (flg){ /* previous non-scalable implementation */
37: printf("use previous non-scalable implementation...\n");
38: for (i=0; i<ov; ++i) {
39: MatIncreaseOverlap_MPISBAIJ_Once(C,is_max,is_new);
40: }
41: } else { /* scalable implementation using modified BAIJ routines */
43: PetscMalloc((Mbs+1)*sizeof(PetscInt),&nidx);
44: PetscBTCreate(Mbs,table); /* for column search */
46: /* Create is_row */
47: PetscMalloc(is_max*sizeof(IS **),&is_row);
48: ISCreateStride(PETSC_COMM_SELF,Mbs,0,1,&is_row[0]);
49: for (i=1; i<is_max; i++) is_row[i] = is_row[0]; /* reuse is_row[0] */
50:
51: /* Allocate memory to hold all the submatrices - Modified from MatGetSubMatrices_MPIBAIJ() */
52: PetscMalloc((is_max+1)*sizeof(Mat),&submats);
54: /* Determine the number of stages through which submatrices are done */
55: nmax = 20*1000000 / (c->Nbs * sizeof(PetscInt));
56: if (!nmax) nmax = 1;
57: nstages_local = is_max/nmax + ((is_max % nmax)?1:0);
58:
59: /* Make sure every processor loops through the nstages */
60: MPI_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,((PetscObject)C)->comm);
61:
62: for (iov=0; iov<ov; ++iov) {
63: /* 1) Get submats for column search */
64: for (i=0,pos=0; i<nstages; i++) {
65: if (pos+nmax <= is_max) max_no = nmax;
66: else if (pos == is_max) max_no = 0;
67: else max_no = is_max-pos;
68:
69: c->ijonly = PETSC_TRUE;
70: MatGetSubMatrices_MPIBAIJ_local(C,max_no,is_row+pos,is_new+pos,MAT_INITIAL_MATRIX,submats+pos);
71: pos += max_no;
72: }
73:
74: /* 2) Row search */
75: MatIncreaseOverlap_MPIBAIJ_Once(C,is_max,is_new);
77: /* 3) Column search */
78: for (i=0; i<is_max; i++){
79: asub_i = (Mat_SeqSBAIJ*)submats[i]->data;
80: ai=asub_i->i;;
81:
82: /* put is_new obtained from MatIncreaseOverlap_MPIBAIJ() to table */
83: PetscBTMemzero(Mbs,table);
84:
85: ISGetIndices(is_new[i],&idx);
86: ISGetLocalSize(is_new[i],&nis);
87: for (l=0; l<nis; l++) {
88: PetscBTSet(table,idx[l]);
89: nidx[l] = idx[l];
90: }
91: isz = nis;
93: /* add column entries to table */
94: for (brow=0; brow<Mbs; brow++){
95: nz = ai[brow+1] - ai[brow];
96: if (nz) {
97: if (!PetscBTLookupSet(table,brow)) nidx[isz++] = brow;
98: }
99: }
100: ISRestoreIndices(is_new[i],&idx);
101: ISDestroy(&is_new[i]);
103: /* create updated is_new */
104: ISCreateGeneral(PETSC_COMM_SELF,isz,nidx,PETSC_COPY_VALUES,is_new+i);
105: }
107: /* Free tmp spaces */
108: for (i=0; i<is_max; i++){
109: MatDestroy(&submats[i]);
110: }
111: }
113: PetscBTDestroy(table);
114: PetscFree(submats);
115: ISDestroy(&is_row[0]);
116: PetscFree(is_row);
117: PetscFree(nidx);
119: }
121: for (i=0; i<is_max; i++) {ISDestroy(&is[i]);}
122: ISExpandIndicesGeneral(N,N,bs,is_max,is_new,is);
124: for (i=0; i<is_max; i++) {ISDestroy(&is_new[i]);}
125: PetscFree(is_new);
126: return(0);
127: }
129: typedef enum {MINE,OTHER} WhoseOwner;
130: /* data1, odata1 and odata2 are packed in the format (for communication):
131: data[0] = is_max, no of is
132: data[1] = size of is[0]
133: ...
134: data[is_max] = size of is[is_max-1]
135: data[is_max + 1] = data(is[0])
136: ...
137: data[is_max+1+sum(size of is[k]), k=0,...,i-1] = data(is[i])
138: ...
139: data2 is packed in the format (for creating output is[]):
140: data[0] = is_max, no of is
141: data[1] = size of is[0]
142: ...
143: data[is_max] = size of is[is_max-1]
144: data[is_max + 1] = data(is[0])
145: ...
146: data[is_max + 1 + Mbs*i) = data(is[i])
147: ...
148: */
151: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat C,PetscInt is_max,IS is[])
152: {
153: Mat_MPISBAIJ *c = (Mat_MPISBAIJ*)C->data;
155: PetscMPIInt size,rank,tag1,tag2,*len_s,nrqr,nrqs,*id_r1,*len_r1,flag,len;
156: const PetscInt *idx_i;
157: PetscInt idx,isz,col,*n,*data1,**data1_start,*data2,*data2_i,*data,*data_i,
158: Mbs,i,j,k,*odata1,*odata2,
159: proc_id,**odata2_ptr,*ctable=0,*btable,len_max,len_est;
160: PetscInt proc_end=0,*iwork,len_unused,nodata2;
161: PetscInt ois_max; /* max no of is[] in each of processor */
162: char *t_p;
163: MPI_Comm comm;
164: MPI_Request *s_waits1,*s_waits2,r_req;
165: MPI_Status *s_status,r_status;
166: PetscBT *table; /* mark indices of this processor's is[] */
167: PetscBT table_i;
168: PetscBT otable; /* mark indices of other processors' is[] */
169: PetscInt bs=C->rmap->bs,Bn = c->B->cmap->n,Bnbs = Bn/bs,*Bowners;
170: IS garray_local,garray_gl;
173: comm = ((PetscObject)C)->comm;
174: size = c->size;
175: rank = c->rank;
176: Mbs = c->Mbs;
178: PetscObjectGetNewTag((PetscObject)C,&tag1);
179: PetscObjectGetNewTag((PetscObject)C,&tag2);
181: /* create tables used in
182: step 1: table[i] - mark c->garray of proc [i]
183: step 3: table[i] - mark indices of is[i] when whose=MINE
184: table[0] - mark incideces of is[] when whose=OTHER */
185: len = PetscMax(is_max, size);
186: PetscMalloc2(len,PetscBT,&table,(Mbs/PETSC_BITS_PER_BYTE+1)*len,char,&t_p);
187: for (i=0; i<len; i++) {
188: table[i] = t_p + (Mbs/PETSC_BITS_PER_BYTE+1)*i;
189: }
191: MPI_Allreduce(&is_max,&ois_max,1,MPIU_INT,MPI_MAX,comm);
192:
193: /* 1. Send this processor's is[] to other processors */
194: /*---------------------------------------------------*/
195: /* allocate spaces */
196: PetscMalloc(is_max*sizeof(PetscInt),&n);
197: len = 0;
198: for (i=0; i<is_max; i++) {
199: ISGetLocalSize(is[i],&n[i]);
200: len += n[i];
201: }
202: if (!len) {
203: is_max = 0;
204: } else {
205: len += 1 + is_max; /* max length of data1 for one processor */
206: }
208:
209: PetscMalloc((size*len+1)*sizeof(PetscInt),&data1);
210: PetscMalloc(size*sizeof(PetscInt*),&data1_start);
211: for (i=0; i<size; i++) data1_start[i] = data1 + i*len;
213: PetscMalloc4(size,PetscInt,&len_s,size,PetscInt,&btable,size,PetscInt,&iwork,size+1,PetscInt,&Bowners);
215: /* gather c->garray from all processors */
216: ISCreateGeneral(comm,Bnbs,c->garray,PETSC_COPY_VALUES,&garray_local);
217: ISAllGather(garray_local, &garray_gl);
218: ISDestroy(&garray_local);
219: MPI_Allgather(&Bnbs,1,MPIU_INT,Bowners+1,1,MPIU_INT,comm);
220: Bowners[0] = 0;
221: for (i=0; i<size; i++) Bowners[i+1] += Bowners[i];
222:
223: if (is_max){
224: /* hash table ctable which maps c->row to proc_id) */
225: PetscMalloc(Mbs*sizeof(PetscInt),&ctable);
226: for (proc_id=0,j=0; proc_id<size; proc_id++) {
227: for (; j<C->rmap->range[proc_id+1]/bs; j++) {
228: ctable[j] = proc_id;
229: }
230: }
232: /* hash tables marking c->garray */
233: ISGetIndices(garray_gl,&idx_i);
234: for (i=0; i<size; i++){
235: table_i = table[i];
236: PetscBTMemzero(Mbs,table_i);
237: for (j = Bowners[i]; j<Bowners[i+1]; j++){ /* go through B cols of proc[i]*/
238: PetscBTSet(table_i,idx_i[j]);
239: }
240: }
241: ISRestoreIndices(garray_gl,&idx_i);
242: } /* if (is_max) */
243: ISDestroy(&garray_gl);
245: /* evaluate communication - mesg to who, length, and buffer space */
246: for (i=0; i<size; i++) len_s[i] = 0;
247:
248: /* header of data1 */
249: for (proc_id=0; proc_id<size; proc_id++){
250: iwork[proc_id] = 0;
251: *data1_start[proc_id] = is_max;
252: data1_start[proc_id]++;
253: for (j=0; j<is_max; j++) {
254: if (proc_id == rank){
255: *data1_start[proc_id] = n[j];
256: } else {
257: *data1_start[proc_id] = 0;
258: }
259: data1_start[proc_id]++;
260: }
261: }
262:
263: for (i=0; i<is_max; i++) {
264: ISGetIndices(is[i],&idx_i);
265: for (j=0; j<n[i]; j++){
266: idx = idx_i[j];
267: *data1_start[rank] = idx; data1_start[rank]++; /* for local proccessing */
268: proc_end = ctable[idx];
269: for (proc_id=0; proc_id<=proc_end; proc_id++){ /* for others to process */
270: if (proc_id == rank ) continue; /* done before this loop */
271: if (proc_id < proc_end && !PetscBTLookup(table[proc_id],idx))
272: continue; /* no need for sending idx to [proc_id] */
273: *data1_start[proc_id] = idx; data1_start[proc_id]++;
274: len_s[proc_id]++;
275: }
276: }
277: /* update header data */
278: for (proc_id=0; proc_id<size; proc_id++){
279: if (proc_id== rank) continue;
280: *(data1 + proc_id*len + 1 + i) = len_s[proc_id] - iwork[proc_id];
281: iwork[proc_id] = len_s[proc_id] ;
282: }
283: ISRestoreIndices(is[i],&idx_i);
284: }
286: nrqs = 0; nrqr = 0;
287: for (i=0; i<size; i++){
288: data1_start[i] = data1 + i*len;
289: if (len_s[i]){
290: nrqs++;
291: len_s[i] += 1 + is_max; /* add no. of header msg */
292: }
293: }
295: for (i=0; i<is_max; i++) {
296: ISDestroy(&is[i]);
297: }
298: PetscFree(n);
299: PetscFree(ctable);
301: /* Determine the number of messages to expect, their lengths, from from-ids */
302: PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&nrqr);
303: PetscGatherMessageLengths(comm,nrqs,nrqr,len_s,&id_r1,&len_r1);
304:
305: /* Now post the sends */
306: PetscMalloc2(size,MPI_Request,&s_waits1,size,MPI_Request,&s_waits2);
307: k = 0;
308: for (proc_id=0; proc_id<size; proc_id++){ /* send data1 to processor [proc_id] */
309: if (len_s[proc_id]){
310: MPI_Isend(data1_start[proc_id],len_s[proc_id],MPIU_INT,proc_id,tag1,comm,s_waits1+k);
311: k++;
312: }
313: }
315: /* 2. Receive other's is[] and process. Then send back */
316: /*-----------------------------------------------------*/
317: len = 0;
318: for (i=0; i<nrqr; i++){
319: if (len_r1[i] > len)len = len_r1[i];
320: }
321: PetscFree(len_r1);
322: PetscFree(id_r1);
324: for (proc_id=0; proc_id<size; proc_id++)
325: len_s[proc_id] = iwork[proc_id] = 0;
326:
327: PetscMalloc((len+1)*sizeof(PetscInt),&odata1);
328: PetscMalloc(size*sizeof(PetscInt**),&odata2_ptr);
329: PetscBTCreate(Mbs,otable);
331: len_max = ois_max*(Mbs+1); /* max space storing all is[] for each receive */
332: len_est = 2*len_max; /* estimated space of storing is[] for all receiving messages */
333: PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);
334: nodata2 = 0; /* nodata2+1: num of PetscMalloc(,&odata2_ptr[]) called */
335: odata2_ptr[nodata2] = odata2;
336: len_unused = len_est; /* unused space in the array odata2_ptr[nodata2]-- needs to be >= len_max */
337:
338: k = 0;
339: while (k < nrqr){
340: /* Receive messages */
341: MPI_Iprobe(MPI_ANY_SOURCE,tag1,comm,&flag,&r_status);
342: if (flag){
343: MPI_Get_count(&r_status,MPIU_INT,&len);
344: proc_id = r_status.MPI_SOURCE;
345: MPI_Irecv(odata1,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);
346: MPI_Wait(&r_req,&r_status);
348: /* Process messages */
349: /* make sure there is enough unused space in odata2 array */
350: if (len_unused < len_max){ /* allocate more space for odata2 */
351: PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);
352: odata2_ptr[++nodata2] = odata2;
353: len_unused = len_est;
354: }
356: MatIncreaseOverlap_MPISBAIJ_Local(C,odata1,OTHER,odata2,&otable);
357: len = 1 + odata2[0];
358: for (i=0; i<odata2[0]; i++){
359: len += odata2[1 + i];
360: }
362: /* Send messages back */
363: MPI_Isend(odata2,len,MPIU_INT,proc_id,tag2,comm,s_waits2+k);
364: k++;
365: odata2 += len;
366: len_unused -= len;
367: len_s[proc_id] = len; /* num of messages sending back to [proc_id] by this proc */
368: }
369: }
370: PetscFree(odata1);
371: PetscBTDestroy(otable);
373: /* 3. Do local work on this processor's is[] */
374: /*-------------------------------------------*/
375: /* make sure there is enough unused space in odata2(=data) array */
376: len_max = is_max*(Mbs+1); /* max space storing all is[] for this processor */
377: if (len_unused < len_max){ /* allocate more space for odata2 */
378: PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);
379: odata2_ptr[++nodata2] = odata2;
380: len_unused = len_est;
381: }
383: data = odata2;
384: MatIncreaseOverlap_MPISBAIJ_Local(C,data1_start[rank],MINE,data,table);
385: PetscFree(data1_start);
387: /* 4. Receive work done on other processors, then merge */
388: /*------------------------------------------------------*/
389: /* get max number of messages that this processor expects to recv */
390: MPI_Allreduce(len_s,iwork,size,MPIU_INT,MPI_MAX,comm);
391: PetscMalloc((iwork[rank]+1)*sizeof(PetscInt),&data2);
392: PetscFree4(len_s,btable,iwork,Bowners);
394: k = 0;
395: while (k < nrqs){
396: /* Receive messages */
397: MPI_Iprobe(MPI_ANY_SOURCE,tag2,comm,&flag,&r_status);
398: if (flag){
399: MPI_Get_count(&r_status,MPIU_INT,&len);
400: proc_id = r_status.MPI_SOURCE;
401: MPI_Irecv(data2,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);
402: MPI_Wait(&r_req,&r_status);
403: if (len > 1+is_max){ /* Add data2 into data */
404: data2_i = data2 + 1 + is_max;
405: for (i=0; i<is_max; i++){
406: table_i = table[i];
407: data_i = data + 1 + is_max + Mbs*i;
408: isz = data[1+i];
409: for (j=0; j<data2[1+i]; j++){
410: col = data2_i[j];
411: if (!PetscBTLookupSet(table_i,col)) {data_i[isz++] = col;}
412: }
413: data[1+i] = isz;
414: if (i < is_max - 1) data2_i += data2[1+i];
415: }
416: }
417: k++;
418: }
419: }
420: PetscFree(data2);
421: PetscFree2(table,t_p);
423: /* phase 1 sends are complete */
424: PetscMalloc(size*sizeof(MPI_Status),&s_status);
425: if (nrqs) {MPI_Waitall(nrqs,s_waits1,s_status);}
426: PetscFree(data1);
427:
428: /* phase 2 sends are complete */
429: if (nrqr){MPI_Waitall(nrqr,s_waits2,s_status);}
430: PetscFree2(s_waits1,s_waits2);
431: PetscFree(s_status);
433: /* 5. Create new is[] */
434: /*--------------------*/
435: for (i=0; i<is_max; i++) {
436: data_i = data + 1 + is_max + Mbs*i;
437: ISCreateGeneral(PETSC_COMM_SELF,data[1+i],data_i,PETSC_COPY_VALUES,is+i);
438: }
439: for (k=0; k<=nodata2; k++){
440: PetscFree(odata2_ptr[k]);
441: }
442: PetscFree(odata2_ptr);
444: return(0);
445: }
449: /*
450: MatIncreaseOverlap_MPISBAIJ_Local - Called by MatIncreaseOverlap, to do
451: the work on the local processor.
453: Inputs:
454: C - MAT_MPISBAIJ;
455: data - holds is[]. See MatIncreaseOverlap_MPISBAIJ_Once() for the format.
456: whose - whose is[] to be processed,
457: MINE: this processor's is[]
458: OTHER: other processor's is[]
459: Output:
460: nidx - whose = MINE:
461: holds input and newly found indices in the same format as data
462: whose = OTHER:
463: only holds the newly found indices
464: table - table[i]: mark the indices of is[i], i=0,...,is_max. Used only in the case 'whose=MINE'.
465: */
466: /* Would computation be reduced by swapping the loop 'for each is' and 'for each row'? */
467: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat C,PetscInt *data,PetscInt whose,PetscInt *nidx,PetscBT *table)
468: {
469: Mat_MPISBAIJ *c = (Mat_MPISBAIJ*)C->data;
470: Mat_SeqSBAIJ *a = (Mat_SeqSBAIJ*)(c->A)->data;
471: Mat_SeqBAIJ *b = (Mat_SeqBAIJ*)(c->B)->data;
473: PetscInt row,mbs,Mbs,*nidx_i,col,col_max,isz,isz0,*ai,*aj,*bi,*bj,*garray,rstart,l;
474: PetscInt a_start,a_end,b_start,b_end,i,j,k,is_max,*idx_i,n;
475: PetscBT table0; /* mark the indices of input is[] for look up */
476: PetscBT table_i; /* poits to i-th table. When whose=OTHER, a single table is used for all is[] */
477:
479: Mbs = c->Mbs; mbs = a->mbs;
480: ai = a->i; aj = a->j;
481: bi = b->i; bj = b->j;
482: garray = c->garray;
483: rstart = c->rstartbs;
484: is_max = data[0];
486: PetscBTCreate(Mbs,table0);
487:
488: nidx[0] = is_max;
489: idx_i = data + is_max + 1; /* ptr to input is[0] array */
490: nidx_i = nidx + is_max + 1; /* ptr to output is[0] array */
491: for (i=0; i<is_max; i++) { /* for each is */
492: isz = 0;
493: n = data[1+i]; /* size of input is[i] */
495: /* initialize and set table_i(mark idx and nidx) and table0(only mark idx) */
496: if (whose == MINE){ /* process this processor's is[] */
497: table_i = table[i];
498: nidx_i = nidx + 1+ is_max + Mbs*i;
499: } else { /* process other processor's is[] - only use one temp table */
500: table_i = table[0];
501: }
502: PetscBTMemzero(Mbs,table_i);
503: PetscBTMemzero(Mbs,table0);
504: if (n==0) {
505: nidx[1+i] = 0; /* size of new is[i] */
506: continue;
507: }
509: isz0 = 0; col_max = 0;
510: for (j=0; j<n; j++){
511: col = idx_i[j];
512: if (col >= Mbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"index col %D >= Mbs %D",col,Mbs);
513: if(!PetscBTLookupSet(table_i,col)) {
514: PetscBTSet(table0,col);
515: if (whose == MINE) {nidx_i[isz0] = col;}
516: if (col_max < col) col_max = col;
517: isz0++;
518: }
519: }
520:
521: if (whose == MINE) {isz = isz0;}
522: k = 0; /* no. of indices from input is[i] that have been examined */
523: for (row=0; row<mbs; row++){
524: a_start = ai[row]; a_end = ai[row+1];
525: b_start = bi[row]; b_end = bi[row+1];
526: if (PetscBTLookup(table0,row+rstart)){ /* row is on input is[i]:
527: do row search: collect all col in this row */
528: for (l = a_start; l<a_end ; l++){ /* Amat */
529: col = aj[l] + rstart;
530: if (!PetscBTLookupSet(table_i,col)) {nidx_i[isz++] = col;}
531: }
532: for (l = b_start; l<b_end ; l++){ /* Bmat */
533: col = garray[bj[l]];
534: if (!PetscBTLookupSet(table_i,col)) {nidx_i[isz++] = col;}
535: }
536: k++;
537: if (k >= isz0) break; /* for (row=0; row<mbs; row++) */
538: } else { /* row is not on input is[i]:
539: do col serach: add row onto nidx_i if there is a col in nidx_i */
540: for (l = a_start; l<a_end ; l++){ /* Amat */
541: col = aj[l] + rstart;
542: if (col > col_max) break;
543: if (PetscBTLookup(table0,col)){
544: if (!PetscBTLookupSet(table_i,row+rstart)) {nidx_i[isz++] = row+rstart;}
545: break; /* for l = start; l<end ; l++) */
546: }
547: }
548: for (l = b_start; l<b_end ; l++){ /* Bmat */
549: col = garray[bj[l]];
550: if (col > col_max) break;
551: if (PetscBTLookup(table0,col)){
552: if (!PetscBTLookupSet(table_i,row+rstart)) {nidx_i[isz++] = row+rstart;}
553: break; /* for l = start; l<end ; l++) */
554: }
555: }
556: }
557: }
558:
559: if (i < is_max - 1){
560: idx_i += n; /* ptr to input is[i+1] array */
561: nidx_i += isz; /* ptr to output is[i+1] array */
562: }
563: nidx[1+i] = isz; /* size of new is[i] */
564: } /* for each is */
565: PetscBTDestroy(table0);
566:
567: return(0);
568: }