Actual source code: pmetis.c

  1: 
  2: #include <../src/mat/impls/adj/mpi/mpiadj.h>    /*I "petscmat.h" I*/

  4: /* 
  5:    Currently using ParMetis-2.0. The following include file has
  6:    to be changed to par_kmetis.h for ParMetis-1.0
  7: */
  9: #include <parmetis.h>

 12: /*
 13:       The first 5 elements of this structure are the input control array to Metis
 14: */
 15: typedef struct {
 16:   int cuts;         /* number of cuts made (output) */
 17:   int foldfactor;
 18:   int parallel;     /* use parallel partitioner for coarse problem */
 19:   int indexing;     /* 0 indicates C indexing, 1 Fortran */
 20:   int printout;     /* indicates if one wishes Metis to print info */
 21:   MPI_Comm comm_pmetis;
 22: } MatPartitioning_Parmetis;

 24: /*
 25:    Uses the ParMETIS parallel matrix partitioner to partition the matrix in parallel
 26: */
 29: static PetscErrorCode MatPartitioningApply_Parmetis(MatPartitioning part,IS *partitioning)
 30: {
 31:   MatPartitioning_Parmetis *parmetis = (MatPartitioning_Parmetis*)part->data;
 32:   PetscErrorCode           ierr;
 33:   int                      *locals,size,rank;
 34:   int                      *vtxdist,*xadj,*adjncy,itmp = 0;
 35:   int                      wgtflag=0, numflag=0, ncon=1, nparts=part->n, options[3],  i,j;
 36:   Mat                      mat = part->adj;
 37:   Mat_MPIAdj               *adj = (Mat_MPIAdj *)mat->data;
 38:   PetscBool                flg;
 39:   float                    *tpwgts,*ubvec;
 40:   PetscInt                 bs = 1,nold;

 43:   MPI_Comm_size(((PetscObject)mat)->comm,&size);

 45:   PetscTypeCompare((PetscObject)mat,MATMPIADJ,&flg);
 46:   if (!flg) {
 47:     /* bs indicates if the converted matrix is "reduced" from the original and hence the 
 48:        resulting partition results need to be stretched to match the original matrix */
 49:     nold = mat->rmap->n;
 50:     MatConvert(mat,MATMPIADJ,MAT_INITIAL_MATRIX,&mat);
 51:     bs   = nold/mat->rmap->n;
 52:     adj  = (Mat_MPIAdj *)mat->data;
 53:   }

 55:   vtxdist = mat->rmap->range;
 56:   xadj    = adj->i;
 57:   adjncy  = adj->j;
 58:   MPI_Comm_rank(((PetscObject)part)->comm,&rank);
 59: #if 1
 60:   if ((vtxdist[rank+1] - vtxdist[rank]) < 1) {
 61:     SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Does not support any processor with %d entries",vtxdist[rank+1] - vtxdist[rank]);
 62:   }
 63: #endif
 64: #if defined(PETSC_USE_DEBUG)
 65:   /* check that matrix has no diagonal entries */
 66:   {
 67:     int rstart;
 68:     MatGetOwnershipRange(mat,&rstart,PETSC_NULL);
 69:     for (i=0; i<mat->rmap->n; i++) {
 70:       for (j=xadj[i]; j<xadj[i+1]; j++) {
 71:         if (adjncy[j] == i+rstart) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Row %d has diagonal entry; Parmetis forbids diagonal entry",i+rstart);
 72:       }
 73:     }
 74:   }
 75: #endif

 77:   PetscMalloc((mat->rmap->n+1)*sizeof(int),&locals);

 79:   if (PetscLogPrintInfo) {itmp = parmetis->printout; parmetis->printout = 127;}
 80:   PetscMalloc(ncon*nparts*sizeof(float),&tpwgts);
 81:   for (i=0; i<ncon; i++) {
 82:     for (j=0; j<nparts; j++) {
 83:       if (part->part_weights) {
 84:         tpwgts[i*nparts+j] = part->part_weights[i*nparts+j];
 85:       } else {
 86:         tpwgts[i*nparts+j] = 1./nparts;
 87:       }
 88:     }
 89:   }
 90:   PetscMalloc(ncon*sizeof(float),&ubvec);
 91:   for (i=0; i<ncon; i++) {
 92:     ubvec[i] = 1.05;
 93:   }
 94:   options[0] = 0;
 95:   /* ParMETIS has no error conditions ??? */
 96:   ParMETIS_V3_PartKway(vtxdist,xadj,adjncy,part->vertex_weights,adj->values,&wgtflag,&numflag,&ncon,&nparts,tpwgts,ubvec,options,&parmetis->cuts,locals,&parmetis->comm_pmetis);
 97:   PetscFree(tpwgts);
 98:   PetscFree(ubvec);
 99:   if (PetscLogPrintInfo) {parmetis->printout = itmp;}

101:   if (bs > 1) {
102:     PetscInt *newlocals;
103:     PetscMalloc(bs*mat->rmap->n*sizeof(PetscInt),&newlocals);
104:     for (i=0; i<mat->rmap->n; i++) {
105:       for (j=0; j<bs; j++) {
106:         newlocals[bs*i + j] = locals[i];
107:       }
108:     }
109:     PetscFree(locals);
110:     ISCreateGeneral(((PetscObject)part)->comm,bs*mat->rmap->n,newlocals,PETSC_OWN_POINTER,partitioning);
111:   } else {
112:     ISCreateGeneral(((PetscObject)part)->comm,mat->rmap->n,locals,PETSC_OWN_POINTER,partitioning);
113:   }

115:   if (!flg) {
116:     MatDestroy(&mat);
117:   }
118:   return(0);
119: }


124: PetscErrorCode MatPartitioningView_Parmetis(MatPartitioning part,PetscViewer viewer)
125: {
126:   MatPartitioning_Parmetis *parmetis = (MatPartitioning_Parmetis *)part->data;
128:   int rank;
129:   PetscBool                iascii;

132:   MPI_Comm_rank(((PetscObject)part)->comm,&rank);
133:   PetscTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
134:   if (iascii) {
135:     if (parmetis->parallel == 2) {
136:       PetscViewerASCIIPrintf(viewer,"  Using parallel coarse grid partitioner\n");
137:     } else {
138:       PetscViewerASCIIPrintf(viewer,"  Using sequential coarse grid partitioner\n");
139:     }
140:     PetscViewerASCIIPrintf(viewer,"  Using %d fold factor\n",parmetis->foldfactor);
141:     PetscViewerASCIISynchronizedAllow(viewer,PETSC_TRUE);
142:     PetscViewerASCIISynchronizedPrintf(viewer,"  [%d]Number of cuts found %d\n",rank,parmetis->cuts);
143:     PetscViewerFlush(viewer);
144:     PetscViewerASCIISynchronizedAllow(viewer,PETSC_FALSE);
145:   } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Viewer type %s not supported for this Parmetis partitioner",((PetscObject)viewer)->type_name);

147:   return(0);
148: }

152: /*@
153:      MatPartitioningParmetisSetCoarseSequential - Use the sequential code to 
154:          do the partitioning of the coarse grid.

156:   Logically Collective on MatPartitioning

158:   Input Parameter:
159: .  part - the partitioning context

161:    Level: advanced

163: @*/
164: PetscErrorCode  MatPartitioningParmetisSetCoarseSequential(MatPartitioning part)
165: {
166:   MatPartitioning_Parmetis *parmetis = (MatPartitioning_Parmetis *)part->data;

169:   parmetis->parallel = 1;
170:   return(0);
171: }

175: /*@
176:   MatPartitioningParmetisGetEdgeCut - Returns the number of edge cuts in the vertex partition.

178:   Input Parameter:
179: . part - the partitioning context

181:   Output Parameter:
182: . cut - the edge cut

184:    Level: advanced

186: @*/
187: PetscErrorCode  MatPartitioningParmetisGetEdgeCut(MatPartitioning part, PetscInt *cut)
188: {
189:   MatPartitioning_Parmetis *parmetis = (MatPartitioning_Parmetis *) part->data;

192:   *cut = parmetis->cuts;
193:   return(0);
194: }

198: PetscErrorCode MatPartitioningSetFromOptions_Parmetis(MatPartitioning part)
199: {
201:   PetscBool      flag = PETSC_FALSE;

204:   PetscOptionsHead("Set ParMeTiS partitioning options");
205:   PetscOptionsBool("-mat_partitioning_parmetis_coarse_sequential","Use sequential coarse partitioner","MatPartitioningParmetisSetCoarseSequential",flag,&flag,PETSC_NULL);
206:     if (flag) {
207:       MatPartitioningParmetisSetCoarseSequential(part);
208:     }
209:   PetscOptionsTail();
210:   return(0);
211: }


216: PetscErrorCode MatPartitioningDestroy_Parmetis(MatPartitioning part)
217: {
218:   MatPartitioning_Parmetis *parmetis = (MatPartitioning_Parmetis *)part->data;

222:   MPI_Comm_free(&(parmetis->comm_pmetis));
223:   PetscFree(parmetis);
224:   return(0);
225: }


228: /*MC
229:    MATPARTITIONINGPARMETIS - Creates a partitioning context via the external package PARMETIS.

231:    Collective on MPI_Comm

233:    Input Parameter:
234: .  part - the partitioning context

236:    Options Database Keys:
237: +  -mat_partitioning_parmetis_coarse_sequential - use sequential PARMETIS coarse partitioner

239:    Level: beginner

241:    Notes: See http://www-users.cs.umn.edu/~karypis/metis/

243: .keywords: Partitioning, create, context

245: .seealso: MatPartitioningSetType(), MatPartitioningType

247: M*/

252: PetscErrorCode  MatPartitioningCreate_Parmetis(MatPartitioning part)
253: {
255:   MatPartitioning_Parmetis *parmetis;

258:   PetscNewLog(part,MatPartitioning_Parmetis,&parmetis);
259:   part->data                = (void*)parmetis;

261:   parmetis->cuts       = 0;   /* output variable */
262:   parmetis->foldfactor = 150; /*folding factor */
263:   parmetis->parallel   = 2;   /* use parallel partitioner for coarse grid */
264:   parmetis->indexing   = 0;   /* index numbering starts from 0 */
265:   parmetis->printout   = 0;   /* print no output while running */

267:   MPI_Comm_dup(((PetscObject)part)->comm,&(parmetis->comm_pmetis));

269:   part->ops->apply          = MatPartitioningApply_Parmetis;
270:   part->ops->view           = MatPartitioningView_Parmetis;
271:   part->ops->destroy        = MatPartitioningDestroy_Parmetis;
272:   part->ops->setfromoptions = MatPartitioningSetFromOptions_Parmetis;
273:   return(0);
274: }

279: /*@
280:  MatMeshToVertexGraph -   This routine does not exist because ParMETIS does not provide the functionality.  Uses the ParMETIS package to
281:                        convert a Mat that represents a mesh to a Mat the represents the graph of the coupling 
282:                        between vertices of the cells and is suitable for partitioning with the MatPartitioning object. Use this to partition
283:                        vertices of a mesh. More likely you should use MatMeshToCellGraph()

285:    Collective on Mat

287:    Input Parameter:
288: +     mesh - the graph that represents the mesh
289: -     ncommonnodes - mesh elements that share this number of common nodes are considered neighbors, use 2 for triangules and 
290:                      quadralaterials, 3 for tetrahedrals and 4 for hexahedrals

292:    Output Parameter:
293: .     dual - the dual graph

295:    Notes:
296:      Currently requires ParMetis to be installed and uses ParMETIS_V3_Mesh2Dual()

298:      The columns of each row of the Mat mesh are the global vertex numbers of the vertices of that rows cell. The number of rows in mesh is 
299:      number of cells, the number of columns is the number of vertices.

301:    Level: advanced

303: .seealso: MatMeshToCellGraph(), MatCreateMPIAdj(), MatPartitioningCreate()
304:    
305: @*/
306: PetscErrorCode MatMeshToVertexGraph(Mat mesh,PetscInt ncommonnodes,Mat *dual)
307: {
309:   SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"ParMETIS does not provide this functionality");
310:   return(0);
311: }

315: /*@
316:      MatMeshToCellGraph -   Uses the ParMETIS package to convert a Mat that represents a mesh to a Mat the represents the graph of the coupling 
317:                        between cells (the "dual" graph) and is suitable for partitioning with the MatPartitioning object. Use this to partition
318:                        cells of a mesh.

320:    Collective on Mat

322:    Input Parameter:
323: +     mesh - the graph that represents the mesh
324: -     ncommonnodes - mesh elements that share this number of common nodes are considered neighbors, use 2 for triangules and 
325:                      quadralaterials, 3 for tetrahedrals and 4 for hexahedrals

327:    Output Parameter:
328: .     dual - the dual graph

330:    Notes:
331:      Currently requires ParMetis to be installed and uses ParMETIS_V3_Mesh2Dual()

333:      The columns of each row of the Mat mesh are the global vertex numbers of the vertices of that rows cell. The number of rows in mesh is 
334:      number of cells, the number of columns is the number of vertices.
335:    

337:    Level: advanced

339: .seealso: MatMeshToVertexGraph(), MatCreateMPIAdj(), MatPartitioningCreate()


342: @*/
343: PetscErrorCode MatMeshToCellGraph(Mat mesh,PetscInt ncommonnodes,Mat *dual)
344: {
345:   PetscErrorCode           ierr;
346:   int                      *newxadj,*newadjncy;
347:   int                      numflag=0;
348:   Mat_MPIAdj               *adj = (Mat_MPIAdj *)mesh->data,*newadj;
349:   PetscBool                flg;

352:   PetscTypeCompare((PetscObject)mesh,MATMPIADJ,&flg);
353:   if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Must use MPIAdj matrix type");

355:   /* ParMETIS has no error conditions ??? */
356:   CHKMEMQ;
357:   ParMETIS_V3_Mesh2Dual(mesh->rmap->range,adj->i,adj->j,&numflag,&ncommonnodes,&newxadj,&newadjncy,&((PetscObject)mesh)->comm);
358:   CHKMEMQ;
359:   MatCreateMPIAdj(((PetscObject)mesh)->comm,mesh->rmap->n,mesh->rmap->N,newxadj,newadjncy,PETSC_NULL,dual);
360:   newadj = (Mat_MPIAdj *)(*dual)->data;
361:   newadj->freeaijwithfree = PETSC_TRUE; /* signal the matrix should be freed with system free since space was allocated by ParMETIS */
362:   return(0);
363: }