Actual source code: ml.c

  2: /* 
  3:    Provides an interface to the ML smoothed Aggregation
  4:    Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs
  5:                                     Jed Brown, see [PETSC #18321, #18449]. 
  6: */
  7: #include <private/pcimpl.h>   /*I "petscpc.h" I*/
  8: #include <../src/ksp/pc/impls/mg/mgimpl.h>                    /*I "petscpcmg.h" I*/
  9: #include <../src/mat/impls/aij/seq/aij.h>
 10: #include <../src/mat/impls/aij/mpi/mpiaij.h>

 12: #include <math.h>
 14: /* HAVE_CONFIG_H flag is required by ML include files */
 15: #if !defined(HAVE_CONFIG_H)
 16: #define HAVE_CONFIG_H
 17: #endif
 18: #include <ml_include.h>

 21: /* The context (data structure) at each grid level */
 22: typedef struct {
 23:   Vec        x,b,r;           /* global vectors */
 24:   Mat        A,P,R;
 25:   KSP        ksp;
 26: } GridCtx;

 28: /* The context used to input PETSc matrix into ML at fine grid */
 29: typedef struct {
 30:   Mat          A;      /* Petsc matrix in aij format */
 31:   Mat          Aloc;   /* local portion of A to be used by ML */
 32:   Vec          x,y;
 33:   ML_Operator  *mlmat;
 34:   PetscScalar  *pwork; /* tmp array used by PetscML_comm() */
 35: } FineGridCtx;

 37: /* The context associates a ML matrix with a PETSc shell matrix */
 38: typedef struct {
 39:   Mat          A;       /* PETSc shell matrix associated with mlmat */
 40:   ML_Operator  *mlmat;  /* ML matrix assorciated with A */
 41:   Vec          y, work;
 42: } Mat_MLShell;

 44: /* Private context for the ML preconditioner */
 45: typedef struct {
 46:   ML             *ml_object;
 47:   ML_Aggregate   *agg_object;
 48:   GridCtx        *gridctx;
 49:   FineGridCtx    *PetscMLdata;
 50:   PetscInt       Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization;
 51:   PetscReal      Threshold,DampingFactor,EnergyMinimizationDropTol;
 52:   PetscBool      SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable;
 53:   PetscBool      reuse_interpolation;
 54:   PetscMPIInt    size; /* size of communicator for pc->pmat */
 55: } PC_ML;

 59: static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[])
 60: {
 62:   PetscInt       m,i,j,k=0,row,*aj;
 63:   PetscScalar    *aa;
 64:   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
 65:   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)ml->Aloc->data;


 68:   MatGetSize(ml->Aloc,&m,PETSC_NULL); if (ierr) return(0);
 69:   for (i = 0; i<N_requested_rows; i++) {
 70:     row   = requested_rows[i];
 71:     row_lengths[i] = a->ilen[row];
 72:     if (allocated_space < k+row_lengths[i]) return(0);
 73:     if ( (row >= 0) || (row <= (m-1)) ) {
 74:       aj = a->j + a->i[row];
 75:       aa = a->a + a->i[row];
 76:       for (j=0; j<row_lengths[i]; j++){
 77:         columns[k]  = aj[j];
 78:         values[k++] = aa[j];
 79:       }
 80:     }
 81:   }
 82:   return(1);
 83: }

 87: static PetscErrorCode PetscML_comm(double p[],void *ML_data)
 88: {
 90:   FineGridCtx    *ml=(FineGridCtx*)ML_data;
 91:   Mat            A=ml->A;
 92:   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
 93:   PetscMPIInt    size;
 94:   PetscInt       i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n;
 95:   PetscScalar    *array;

 98:   MPI_Comm_size(((PetscObject)A)->comm,&size);
 99:   if (size == 1) return 0;
100: 
101:   VecPlaceArray(ml->y,p);
102:   VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
103:   VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
104:   VecResetArray(ml->y);
105:   VecGetArray(a->lvec,&array);
106:   for (i=in_length; i<out_length; i++){
107:     p[i] = array[i-in_length];
108:   }
109:   VecRestoreArray(a->lvec,&array);
110:   return(0);
111: }

115: static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
116: {
118:   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data);
119:   Mat            A=ml->A, Aloc=ml->Aloc;
120:   PetscMPIInt    size;
121:   PetscScalar    *pwork=ml->pwork;
122:   PetscInt       i;

125:   MPI_Comm_size(((PetscObject)A)->comm,&size);
126:   if (size == 1){
127:     VecPlaceArray(ml->x,p);
128:   } else {
129:     for (i=0; i<in_length; i++) pwork[i] = p[i];
130:     PetscML_comm(pwork,ml);
131:     VecPlaceArray(ml->x,pwork);
132:   }
133:   VecPlaceArray(ml->y,ap);
134:   MatMult(Aloc,ml->x,ml->y);
135:   VecResetArray(ml->x);
136:   VecResetArray(ml->y);
137:   return(0);
138: }

142: static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
143: {
144:   PetscErrorCode   ierr;
145:   Mat_MLShell      *shell;
146:   PetscScalar      *xarray,*yarray;
147:   PetscInt         x_length,y_length;
148: 
150:   MatShellGetContext(A,(void **)&shell);
151:   VecGetArray(x,&xarray);
152:   VecGetArray(y,&yarray);
153:   x_length = shell->mlmat->invec_leng;
154:   y_length = shell->mlmat->outvec_leng;
155:   ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);
156:   VecRestoreArray(x,&xarray);
157:   VecRestoreArray(y,&yarray);
158:   return(0);
159: }

163: /* Computes y = w + A * x
164:    It is possible that w == y, but not x == y
165: */
166: static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
167: {
168:   Mat_MLShell   *shell;
169:   PetscScalar   *xarray,*yarray;
170:   PetscInt       x_length,y_length;
172: 
174:   MatShellGetContext(A, (void **) &shell);
175:   if (y == w) {
176:     if (!shell->work) {
177:       VecDuplicate(y, &shell->work);
178:     }
179:     VecGetArray(x,           &xarray);
180:     VecGetArray(shell->work, &yarray);
181:     x_length = shell->mlmat->invec_leng;
182:     y_length = shell->mlmat->outvec_leng;
183:     ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray);
184:     VecRestoreArray(x,           &xarray);
185:     VecRestoreArray(shell->work, &yarray);
186:     VecAXPY(y, 1.0, shell->work);
187:   } else {
188:     VecGetArray(x, &xarray);
189:     VecGetArray(y, &yarray);
190:     x_length = shell->mlmat->invec_leng;
191:     y_length = shell->mlmat->outvec_leng;
192:     ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray);
193:     VecRestoreArray(x, &xarray);
194:     VecRestoreArray(y, &yarray);
195:     VecAXPY(y, 1.0, w);
196:   }
197:   return(0);
198: }

200: /* newtype is ignored because "ml" is not listed under Petsc MatType */
203: static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
204: {
205:   PetscErrorCode  ierr;
206:   Mat_MPIAIJ      *mpimat=(Mat_MPIAIJ*)A->data;
207:   Mat_SeqAIJ      *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
208:   PetscInt        *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
209:   PetscScalar     *aa=a->a,*ba=b->a,*ca;
210:   PetscInt        am=A->rmap->n,an=A->cmap->n,i,j,k;
211:   PetscInt        *ci,*cj,ncols;

214:   if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);

216:   if (scall == MAT_INITIAL_MATRIX){
217:     PetscMalloc((1+am)*sizeof(PetscInt),&ci);
218:     ci[0] = 0;
219:     for (i=0; i<am; i++){
220:       ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
221:     }
222:     PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);
223:     PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);

225:     k = 0;
226:     for (i=0; i<am; i++){
227:       /* diagonal portion of A */
228:       ncols = ai[i+1] - ai[i];
229:       for (j=0; j<ncols; j++) {
230:         cj[k]   = *aj++;
231:         ca[k++] = *aa++;
232:       }
233:       /* off-diagonal portion of A */
234:       ncols = bi[i+1] - bi[i];
235:       for (j=0; j<ncols; j++) {
236:         cj[k]   = an + (*bj); bj++;
237:         ca[k++] = *ba++;
238:       }
239:     }
240:     if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);

242:     /* put together the new matrix */
243:     an = mpimat->A->cmap->n+mpimat->B->cmap->n;
244:     MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);

246:     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
247:     /* Since these are PETSc arrays, change flags to free them as necessary. */
248:     mat = (Mat_SeqAIJ*)(*Aloc)->data;
249:     mat->free_a       = PETSC_TRUE;
250:     mat->free_ij      = PETSC_TRUE;

252:     mat->nonew    = 0;
253:   } else if (scall == MAT_REUSE_MATRIX){
254:     mat=(Mat_SeqAIJ*)(*Aloc)->data;
255:     ci = mat->i; cj = mat->j; ca = mat->a;
256:     for (i=0; i<am; i++) {
257:       /* diagonal portion of A */
258:       ncols = ai[i+1] - ai[i];
259:       for (j=0; j<ncols; j++) *ca++ = *aa++;
260:       /* off-diagonal portion of A */
261:       ncols = bi[i+1] - bi[i];
262:       for (j=0; j<ncols; j++) *ca++ = *ba++;
263:     }
264:   } else {
265:     SETERRQ1(((PetscObject)A)->comm,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
266:   }
267:   return(0);
268: }

273: static PetscErrorCode MatDestroy_ML(Mat A)
274: {
276:   Mat_MLShell    *shell;

279:   MatShellGetContext(A,(void **)&shell);
280:   VecDestroy(&shell->y);
281:   if (shell->work) {VecDestroy(&shell->work);}
282:   PetscFree(shell);
283:   MatDestroy_Shell(A);
284:   PetscObjectChangeTypeName((PetscObject)A,0);
285:   return(0);
286: }

290: static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
291: {
292:   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
293:   PetscErrorCode        ierr;
294:   PetscInt              m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = PETSC_NULL,nz_max;
295:   PetscInt              *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i,j,k;
296:   PetscScalar           *ml_vals=matdata->values,*aa;
297: 
299:   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
300:   if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
301:     if (reuse){
302:       Mat_SeqAIJ  *aij= (Mat_SeqAIJ*)(*newmat)->data;
303:       aij->i = ml_rowptr;
304:       aij->j = ml_cols;
305:       aij->a = ml_vals;
306:     } else {
307:       /* sort ml_cols and ml_vals */
308:       PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
309:       for (i=0; i<m; i++) {
310:         nnz[i] = ml_rowptr[i+1] - ml_rowptr[i];
311:       }
312:       aj = ml_cols; aa = ml_vals;
313:       for (i=0; i<m; i++){
314:         PetscSortIntWithScalarArray(nnz[i],aj,aa);
315:         aj += nnz[i]; aa += nnz[i];
316:       }
317:       MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);
318:       PetscFree(nnz);
319:     }
320:     return(0);
321:   }

323:   /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */
324:   if (reuse) {
325:     for (nz_max=0,i=0; i<m; i++) nz_max = PetscMax(nz_max,ml_cols[i+1] - ml_cols[i] + 1);
326:   } else {
327:     MatCreate(PETSC_COMM_SELF,newmat);
328:     MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);
329:     MatSetType(*newmat,MATSEQAIJ);

331:     PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
332:     nz_max = 1;
333:     for (i=0; i<m; i++) {
334:       nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
335:       if (nnz[i] > nz_max) nz_max = nnz[i];
336:     }
337:     MatSeqAIJSetPreallocation(*newmat,0,nnz);
338:   }
339:   PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);
340:   for (i=0; i<m; i++) {
341:     PetscInt ncols;
342:     k = 0;
343:     /* diagonal entry */
344:     aj[k] = i; aa[k++] = ml_vals[i];
345:     /* off diagonal entries */
346:     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
347:       aj[k] = ml_cols[j]; aa[k++] = ml_vals[j];
348:     }
349:     ncols = ml_cols[i+1] - ml_cols[i] + 1;
350:     /* sort aj and aa */
351:     PetscSortIntWithScalarArray(ncols,aj,aa);
352:     MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);
353:   }
354:   MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);
355:   MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);

357:   PetscFree2(aa,aj);
358:   PetscFree(nnz);
359:   return(0);
360: }

364: static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
365: {
367:   PetscInt       m,n;
368:   ML_Comm        *MLcomm;
369:   Mat_MLShell    *shellctx;

372:   m = mlmat->outvec_leng;
373:   n = mlmat->invec_leng;
374:   if (!m || !n){
375:     newmat = PETSC_NULL;
376:     return(0);
377:   }

379:   if (reuse){
380:     MatShellGetContext(*newmat,(void **)&shellctx);
381:     shellctx->mlmat = mlmat;
382:     return(0);
383:   }

385:   MLcomm = mlmat->comm;
386:   PetscNew(Mat_MLShell,&shellctx);
387:   MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);
388:   MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);
389:   MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);
390:   shellctx->A         = *newmat;
391:   shellctx->mlmat     = mlmat;
392:   shellctx->work      = PETSC_NULL;
393:   VecCreate(PETSC_COMM_WORLD,&shellctx->y);
394:   VecSetSizes(shellctx->y,m,PETSC_DECIDE);
395:   VecSetFromOptions(shellctx->y);
396:   (*newmat)->ops->destroy = MatDestroy_ML;
397:   return(0);
398: }

402: static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
403: {
404:   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
405:   PetscInt              *ml_cols=matdata->columns,*aj;
406:   PetscScalar           *ml_vals=matdata->values,*aa;
407:   PetscErrorCode        ierr;
408:   PetscInt              i,j,k,*gordering;
409:   PetscInt              m=mlmat->outvec_leng,n,nz_max,row;
410:   Mat                   A;

413:   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
414:   n = mlmat->invec_leng;
415:   if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);

417:   if (reuse) {
418:     A = *newmat;
419:     for (nz_max=0,i=0; i<m; i++) nz_max = PetscMax(nz_max,ml_cols[i+1] - ml_cols[i] + 1);
420:   } else {
421:     PetscInt *nnzA,*nnzB,*nnz;
422:     MatCreate(mlmat->comm->USR_comm,&A);
423:     MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);
424:     MatSetType(A,MATMPIAIJ);
425:     PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);

427:     nz_max = 0;
428:     for (i=0; i<m; i++){
429:       nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
430:       if (nz_max < nnz[i]) nz_max = nnz[i];
431:       nnzA[i] = 1; /* diag */
432:       for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
433:         if (ml_cols[j] < m) nnzA[i]++;
434:       }
435:       nnzB[i] = nnz[i] - nnzA[i];
436:     }
437:     MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);
438:     PetscFree3(nnzA,nnzB,nnz);
439:   }

441:   /* insert mat values -- remap row and column indices */
442:   nz_max++;
443:   PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);
444:   /* create global row numbering for a ML_Operator */
445:   ML_build_global_numbering(mlmat,&gordering,"rows");
446:   for (i=0; i<m; i++) {
447:     PetscInt ncols;
448:     row = gordering[i];
449:     k = 0;
450:     /* diagonal entry */
451:     aj[k] = row; aa[k++] = ml_vals[i];
452:     /* off diagonal entries */
453:     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
454:       aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j];
455:     }
456:     ncols = ml_cols[i+1] - ml_cols[i] + 1;
457:     MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);
458:   }
459:   ML_free(gordering);
460:   MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
461:   MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
462:   *newmat = A;

464:   PetscFree2(aa,aj);
465:   return(0);
466: }

468: /* -----------------------------------------------------------------------------*/
471: PetscErrorCode PCReset_ML(PC pc)
472: {
473:   PetscErrorCode  ierr;
474:   PC_MG           *mg = (PC_MG*)pc->data;
475:   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
476:   PetscInt        level,fine_level=pc_ml->Nlevels-1;

479:   ML_Aggregate_Destroy(&pc_ml->agg_object);
480:   ML_Destroy(&pc_ml->ml_object);

482:   if (pc_ml->PetscMLdata) {
483:     PetscFree(pc_ml->PetscMLdata->pwork);
484:     MatDestroy(&pc_ml->PetscMLdata->Aloc);
485:     VecDestroy(&pc_ml->PetscMLdata->x);
486:     VecDestroy(&pc_ml->PetscMLdata->y);
487:   }
488:   PetscFree(pc_ml->PetscMLdata);

490:   if (pc_ml->gridctx) {
491:     for (level=0; level<fine_level; level++){
492:       if (pc_ml->gridctx[level].A){MatDestroy(&pc_ml->gridctx[level].A);}
493:       if (pc_ml->gridctx[level].P){MatDestroy(&pc_ml->gridctx[level].P);}
494:       if (pc_ml->gridctx[level].R){MatDestroy(&pc_ml->gridctx[level].R);}
495:       if (pc_ml->gridctx[level].x){VecDestroy(&pc_ml->gridctx[level].x);}
496:       if (pc_ml->gridctx[level].b){VecDestroy(&pc_ml->gridctx[level].b);}
497:       if (pc_ml->gridctx[level+1].r){VecDestroy(&pc_ml->gridctx[level+1].r);}
498:     }
499:   }
500:   PetscFree(pc_ml->gridctx);
501:   return(0);
502: }
503: /* -------------------------------------------------------------------------- */
504: /*
505:    PCSetUp_ML - Prepares for the use of the ML preconditioner
506:                     by setting data structures and options.   

508:    Input Parameter:
509: .  pc - the preconditioner context

511:    Application Interface Routine: PCSetUp()

513:    Notes:
514:    The interface routine PCSetUp() is not usually called directly by
515:    the user, but instead is called by PCApply() if necessary.
516: */

522: PetscErrorCode PCSetUp_ML(PC pc)
523: {
524:   PetscErrorCode  ierr;
525:   PetscMPIInt     size;
526:   FineGridCtx     *PetscMLdata;
527:   ML              *ml_object;
528:   ML_Aggregate    *agg_object;
529:   ML_Operator     *mlmat;
530:   PetscInt        nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs;
531:   Mat             A,Aloc;
532:   GridCtx         *gridctx;
533:   PC_MG           *mg = (PC_MG*)pc->data;
534:   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
535:   PetscBool       isSeq, isMPI;
536:   KSP             smoother;
537:   PC              subpc;
538:   PetscInt        mesh_level, old_mesh_level;


542:   /* Since PCMG tries to use DM assocated with PC must delete it */
543:   DMDestroy(&pc->dm);

545:   A = pc->pmat;
546:   MPI_Comm_size(((PetscObject)A)->comm,&size);

548:   if (pc->setupcalled) {
549:     if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) {
550:       /*
551:        Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for
552:        multiple solves in which the matrix is not changing too quickly.
553:        */
554:       ml_object = pc_ml->ml_object;
555:       gridctx = pc_ml->gridctx;
556:       Nlevels = pc_ml->Nlevels;
557:       fine_level = Nlevels - 1;
558:       gridctx[fine_level].A = A;

560:       PetscTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);
561:       PetscTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);
562:       if (isMPI){
563:         MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);
564:       } else if (isSeq) {
565:         Aloc = A;
566:         PetscObjectReference((PetscObject)Aloc);
567:       } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);

569:       MatGetSize(Aloc,&m,&nlocal_allcols);
570:       PetscMLdata = pc_ml->PetscMLdata;
571:       MatDestroy(&PetscMLdata->Aloc);
572:       PetscMLdata->A    = A;
573:       PetscMLdata->Aloc = Aloc;
574:       ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
575:       ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);

577:       mesh_level = ml_object->ML_finest_level;
578:       while (ml_object->SingleLevel[mesh_level].Rmat->to) {
579:         old_mesh_level = mesh_level;
580:         mesh_level = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum;

582:         /* clean and regenerate A */
583:         mlmat = &(ml_object->Amat[mesh_level]);
584:         ML_Operator_Clean(mlmat);
585:         ML_Operator_Init(mlmat,ml_object->comm);
586:         ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level);
587:       }

589:       level = fine_level - 1;
590:       if (size == 1) { /* convert ML P, R and A into seqaij format */
591:         for (mllevel=1; mllevel<Nlevels; mllevel++){
592:           mlmat = &(ml_object->Amat[mllevel]);
593:           MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);
594:           level--;
595:         }
596:       } else { /* convert ML P and R into shell format, ML A into mpiaij format */
597:         for (mllevel=1; mllevel<Nlevels; mllevel++){
598:           mlmat  = &(ml_object->Amat[mllevel]);
599:           MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);
600:           level--;
601:         }
602:       }

604:       for (level=0; level<fine_level; level++) {
605:         if (level > 0){
606:           PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);
607:         }
608:         KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,SAME_NONZERO_PATTERN);
609:       }
610:       PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);
611:       KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,SAME_NONZERO_PATTERN);

613:       PCSetUp_MG(pc);
614:       return(0);
615:     } else {
616:       /* since ML can change the size of vectors/matrices at any level we must destroy everything */
617:       PCReset_ML(pc);
618:       PCReset_MG(pc);
619:     }
620:   }

622:   /* setup special features of PCML */
623:   /*--------------------------------*/
624:   /* covert A to Aloc to be used by ML at fine grid */
625:   pc_ml->size = size;
626:   PetscTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);
627:   PetscTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);
628:   if (isMPI){
629:     MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);
630:   } else if (isSeq) {
631:     Aloc = A;
632:     PetscObjectReference((PetscObject)Aloc);
633:   } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);

635:   /* create and initialize struct 'PetscMLdata' */
636:   PetscNewLog(pc,FineGridCtx,&PetscMLdata);
637:   pc_ml->PetscMLdata = PetscMLdata;
638:   PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);

640:   VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);
641:   VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);
642:   VecSetType(PetscMLdata->x,VECSEQ);

644:   VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);
645:   VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);
646:   VecSetType(PetscMLdata->y,VECSEQ);
647:   PetscMLdata->A    = A;
648:   PetscMLdata->Aloc = Aloc;
649: 
650:   /* create ML discretization matrix at fine grid */
651:   /* ML requires input of fine-grid matrix. It determines nlevels. */
652:   MatGetSize(Aloc,&m,&nlocal_allcols);
653:   MatGetBlockSize(A,&bs);
654:   ML_Create(&ml_object,pc_ml->MaxNlevels);
655:   ML_Comm_Set_UsrComm(ml_object->comm,((PetscObject)A)->comm);
656:   pc_ml->ml_object = ml_object;
657:   ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
658:   ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols);
659:   ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);

661:   ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO);

663:   /* aggregation */
664:   ML_Aggregate_Create(&agg_object);
665:   pc_ml->agg_object = agg_object;

667:   ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);
668:   ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize);
669:   /* set options */
670:   switch (pc_ml->CoarsenScheme) {
671:   case 1:
672:     ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break;
673:   case 2:
674:     ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break;
675:   case 3:
676:     ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break;
677:   }
678:   ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold);
679:   ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor);
680:   if (pc_ml->SpectralNormScheme_Anorm){
681:     ML_Set_SpectralNormScheme_Anorm(ml_object);
682:   }
683:   agg_object->keep_agg_information      = (int)pc_ml->KeepAggInfo;
684:   agg_object->keep_P_tentative          = (int)pc_ml->Reusable;
685:   agg_object->block_scaled_SA           = (int)pc_ml->BlockScaling;
686:   agg_object->minimizing_energy         = (int)pc_ml->EnergyMinimization;
687:   agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol;
688:   agg_object->cheap_minimizing_energy   = (int)pc_ml->EnergyMinimizationCheap;

690:   if (pc_ml->OldHierarchy) {
691:     Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
692:   } else {
693:     Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
694:   }
695:   if (Nlevels<=0) SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
696:   pc_ml->Nlevels = Nlevels;
697:   fine_level = Nlevels - 1;

699:   PCMGSetLevels(pc,Nlevels,PETSC_NULL);
700:   /* set default smoothers */
701:   for (level=1; level<=fine_level; level++){
702:     if (size == 1){
703:       PCMGGetSmoother(pc,level,&smoother);
704:       KSPSetType(smoother,KSPRICHARDSON);
705:       KSPGetPC(smoother,&subpc);
706:       PCSetType(subpc,PCSOR);
707:     } else {
708:       PCMGGetSmoother(pc,level,&smoother);
709:       KSPSetType(smoother,KSPRICHARDSON);
710:       KSPGetPC(smoother,&subpc);
711:       PCSetType(subpc,PCSOR);
712:     }
713:   }
714:   PCSetFromOptions_MG(pc); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
715: 
716:   PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);
717:   pc_ml->gridctx = gridctx;

719:   /* wrap ML matrices by PETSc shell matrices at coarsened grids. 
720:      Level 0 is the finest grid for ML, but coarsest for PETSc! */
721:   gridctx[fine_level].A = A;
722: 
723:   level = fine_level - 1;
724:   if (size == 1){ /* convert ML P, R and A into seqaij format */
725:     for (mllevel=1; mllevel<Nlevels; mllevel++){
726:       mlmat = &(ml_object->Pmat[mllevel]);
727:       MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);
728:       mlmat = &(ml_object->Rmat[mllevel-1]);
729:       MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);
730: 
731:       mlmat = &(ml_object->Amat[mllevel]);
732:       MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);
733:       level--;
734:     }
735:   } else { /* convert ML P and R into shell format, ML A into mpiaij format */
736:     for (mllevel=1; mllevel<Nlevels; mllevel++){
737:       mlmat  = &(ml_object->Pmat[mllevel]);
738:       MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);
739:       mlmat  = &(ml_object->Rmat[mllevel-1]);
740:       MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);

742:       mlmat  = &(ml_object->Amat[mllevel]);
743:       MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);
744:       level--;
745:     }
746:   }

748:   /* create vectors and ksp at all levels */
749:   for (level=0; level<fine_level; level++){
750:     level1 = level + 1;
751:     VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);
752:     VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);
753:     VecSetType(gridctx[level].x,VECMPI);
754:     PCMGSetX(pc,level,gridctx[level].x);
755: 
756:     VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);
757:     VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);
758:     VecSetType(gridctx[level].b,VECMPI);
759:     PCMGSetRhs(pc,level,gridctx[level].b);
760: 
761:     VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);
762:     VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);
763:     VecSetType(gridctx[level1].r,VECMPI);
764:     PCMGSetR(pc,level1,gridctx[level1].r);

766:     if (level == 0){
767:       PCMGGetCoarseSolve(pc,&gridctx[level].ksp);
768:     } else {
769:       PCMGGetSmoother(pc,level,&gridctx[level].ksp);
770:     }
771:   }
772:   PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);

774:   /* create coarse level and the interpolation between the levels */
775:   for (level=0; level<fine_level; level++){
776:     level1 = level + 1;
777:     PCMGSetInterpolation(pc,level1,gridctx[level].P);
778:     PCMGSetRestriction(pc,level1,gridctx[level].R);
779:     if (level > 0){
780:       PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);
781:     }
782:     KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);
783:   }
784:   PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);
785:   KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);

787:   /* setupcalled is set to 0 so that MG is setup from scratch */
788:   pc->setupcalled = 0;
789:   PCSetUp_MG(pc);
790:   return(0);
791: }

793: /* -------------------------------------------------------------------------- */
794: /*
795:    PCDestroy_ML - Destroys the private context for the ML preconditioner
796:    that was created with PCCreate_ML().

798:    Input Parameter:
799: .  pc - the preconditioner context

801:    Application Interface Routine: PCDestroy()
802: */
805: PetscErrorCode PCDestroy_ML(PC pc)
806: {
807:   PetscErrorCode  ierr;
808:   PC_MG           *mg = (PC_MG*)pc->data;
809:   PC_ML           *pc_ml= (PC_ML*)mg->innerctx;

812:   PCReset_ML(pc);
813:   PetscFree(pc_ml);
814:   PCDestroy_MG(pc);
815:   return(0);
816: }

820: PetscErrorCode PCSetFromOptions_ML(PC pc)
821: {
822:   PetscErrorCode  ierr;
823:   PetscInt        indx,PrintLevel;
824:   const char      *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
825:   PC_MG           *mg = (PC_MG*)pc->data;
826:   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
827:   PetscMPIInt     size;
828:   MPI_Comm        comm = ((PetscObject)pc)->comm;

831:   MPI_Comm_size(comm,&size);
832:   PetscOptionsHead("ML options");
833:   PrintLevel    = 0;
834:   indx          = 0;
835:   PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);
836:   ML_Set_PrintLevel(PrintLevel);
837:   PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);
838:   PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);
839:   PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);
840:   pc_ml->CoarsenScheme = indx;
841:   PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);
842:   PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);
843:   PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL);
844:   PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,PETSC_NULL);
845:   PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,PETSC_NULL);
846:   PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,PETSC_NULL);
847:   PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,PETSC_NULL);
848:   /*
849:     The following checks a number of conditions.  If we let this stuff slip by, then ML's error handling will take over.
850:     This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions.

852:     We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working
853:     combination of options and ML's exit(1) explanations don't help matters.
854:   */
855:   if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4");
856:   if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel");
857:   if (pc_ml->EnergyMinimization == 4) {PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2");}
858:   if (pc_ml->EnergyMinimization) {
859:     PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,PETSC_NULL);
860:   }
861:   if (pc_ml->EnergyMinimization == 2) {
862:     /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */
863:     PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,PETSC_NULL);
864:   }
865:   /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */
866:   if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE;
867:   PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,PETSC_NULL);
868:   /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */
869:   if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE;
870:   PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,PETSC_NULL);
871:   /*
872:     ML's C API is severely underdocumented and lacks significant functionality.  The C++ API calls
873:     ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function
874:     ML_Gen_MGHierarchy_UsingAggregation().  This modification, however, does not provide a strict superset of the
875:     functionality in the old function, so some users may still want to use it.  Note that many options are ignored in
876:     this context, but ML doesn't provide a way to find out which ones.
877:    */
878:   PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,PETSC_NULL);
879:   PetscOptionsTail();
880:   return(0);
881: }

883: /* -------------------------------------------------------------------------- */
884: /*
885:    PCCreate_ML - Creates a ML preconditioner context, PC_ML, 
886:    and sets this as the private data within the generic preconditioning 
887:    context, PC, that was created within PCCreate().

889:    Input Parameter:
890: .  pc - the preconditioner context

892:    Application Interface Routine: PCCreate()
893: */

895: /*MC
896:      PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide 
897:        fine grid discretization matrix. The coarser grid matrices and restriction/interpolation 
898:        operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
899:        and the restriction/interpolation operators wrapped as PETSc shell matrices.

901:    Options Database Key: 
902:    Multigrid options(inherited)
903: +  -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
904: .  -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp)
905: .  -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown)
906:    -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade
907:    ML options:
908: .  -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
909: .  -pc_ml_maxNlevels <10>: Maximum number of levels (None)
910: .  -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
911: .  -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS
912: .  -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
913: .  -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
914: -  -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm)

916:    Level: intermediate

918:   Concepts: multigrid
919:  
920: .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType, 
921:            PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(),
922:            PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
923:            PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
924:            PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()      
925: M*/

930: PetscErrorCode  PCCreate_ML(PC pc)
931: {
932:   PetscErrorCode  ierr;
933:   PC_ML           *pc_ml;
934:   PC_MG           *mg;

937:   /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
938:   PCSetType(pc,PCMG); /* calls PCCreate_MG() and MGCreate_Private() */
939:   PetscObjectChangeTypeName((PetscObject)pc,PCML);
940:   /* Since PCMG tries to use DM assocated with PC must delete it */
941:   DMDestroy(&pc->dm);
942:   mg = (PC_MG*)pc->data;
943:   mg->galerkin = 2;             /* Use Galerkin, but it is computed externally */

945:   /* create a supporting struct and attach it to pc */
946:   PetscNewLog(pc,PC_ML,&pc_ml);
947:   mg->innerctx = pc_ml;

949:   pc_ml->ml_object     = 0;
950:   pc_ml->agg_object    = 0;
951:   pc_ml->gridctx       = 0;
952:   pc_ml->PetscMLdata   = 0;
953:   pc_ml->Nlevels       = -1;
954:   pc_ml->MaxNlevels    = 10;
955:   pc_ml->MaxCoarseSize = 1;
956:   pc_ml->CoarsenScheme = 1;
957:   pc_ml->Threshold     = 0.0;
958:   pc_ml->DampingFactor = 4.0/3.0;
959:   pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
960:   pc_ml->size          = 0;

962:   /* overwrite the pointers of PCMG by the functions of PCML */
963:   pc->ops->setfromoptions = PCSetFromOptions_ML;
964:   pc->ops->setup          = PCSetUp_ML;
965:   pc->ops->reset          = PCReset_ML;
966:   pc->ops->destroy        = PCDestroy_ML;
967:   return(0);
968: }