Actual source code: isltog.c

  2: #include <petscvec.h>   /*I "petscvec.h" I*/
  3: #include <private/isimpl.h>    /*I "petscis.h"  I*/

  5: PetscClassId  IS_LTOGM_CLASSID;

  9: /*@C
 10:     ISLocalToGlobalMappingGetSize - Gets the local size of a local to global mapping.

 12:     Not Collective

 14:     Input Parameter:
 15: .   ltog - local to global mapping

 17:     Output Parameter:
 18: .   n - the number of entries in the local mapping

 20:     Level: advanced

 22:     Concepts: mapping^local to global

 24: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 25: @*/
 26: PetscErrorCode  ISLocalToGlobalMappingGetSize(ISLocalToGlobalMapping mapping,PetscInt *n)
 27: {
 31:   *n = mapping->n;
 32:   return(0);
 33: }

 37: /*@C
 38:     ISLocalToGlobalMappingView - View a local to global mapping

 40:     Not Collective

 42:     Input Parameters:
 43: +   ltog - local to global mapping
 44: -   viewer - viewer

 46:     Level: advanced

 48:     Concepts: mapping^local to global

 50: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 51: @*/
 52: PetscErrorCode  ISLocalToGlobalMappingView(ISLocalToGlobalMapping mapping,PetscViewer viewer)
 53: {
 54:   PetscInt        i;
 55:   PetscMPIInt     rank;
 56:   PetscBool       iascii;
 57:   PetscErrorCode  ierr;

 61:   if (!viewer) {
 62:     PetscViewerASCIIGetStdout(((PetscObject)mapping)->comm,&viewer);
 63:   }

 66:   MPI_Comm_rank(((PetscObject)mapping)->comm,&rank);
 67:   PetscTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
 68:   if (iascii) {
 69:     PetscViewerASCIISynchronizedAllow(viewer,PETSC_TRUE);
 70:     for (i=0; i<mapping->n; i++) {
 71:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] %d %d\n",rank,i,mapping->indices[i]);
 72:     }
 73:     PetscViewerFlush(viewer);
 74:     PetscViewerASCIISynchronizedAllow(viewer,PETSC_FALSE);
 75:   } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Viewer type %s not supported for ISLocalToGlobalMapping",((PetscObject)viewer)->type_name);
 76:   return(0);
 77: }

 81: /*@
 82:     ISLocalToGlobalMappingCreateIS - Creates a mapping between a local (0 to n)
 83:     ordering and a global parallel ordering.

 85:     Not collective

 87:     Input Parameter:
 88: .   is - index set containing the global numbers for each local number

 90:     Output Parameter:
 91: .   mapping - new mapping data structure

 93:     Level: advanced

 95:     Concepts: mapping^local to global

 97: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 98: @*/
 99: PetscErrorCode  ISLocalToGlobalMappingCreateIS(IS is,ISLocalToGlobalMapping *mapping)
100: {
102:   PetscInt       n;
103:   const PetscInt *indices;
104:   MPI_Comm       comm;


110:   PetscObjectGetComm((PetscObject)is,&comm);
111:   ISGetLocalSize(is,&n);
112:   ISGetIndices(is,&indices);
113:   ISLocalToGlobalMappingCreate(comm,n,indices,PETSC_COPY_VALUES,mapping);
114:   ISRestoreIndices(is,&indices);
115:   return(0);
116: }


121: /*@
122:     ISLocalToGlobalMappingCreate - Creates a mapping between a local (0 to n)
123:     ordering and a global parallel ordering.

125:     Not Collective, but communicator may have more than one process

127:     Input Parameters:
128: +   comm - MPI communicator
129: .   n - the number of local elements
130: .   indices - the global index for each local element
131: -   mode - see PetscCopyMode

133:     Output Parameter:
134: .   mapping - new mapping data structure

136:     Level: advanced

138:     Concepts: mapping^local to global

140: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS()
141: @*/
142: PetscErrorCode  ISLocalToGlobalMappingCreate(MPI_Comm cm,PetscInt n,const PetscInt indices[],PetscCopyMode mode,ISLocalToGlobalMapping *mapping)
143: {
145:   PetscInt       *in;


151:   *mapping = PETSC_NULL;
152: #ifndef PETSC_USE_DYNAMIC_LIBRARIES
153:   ISInitializePackage(PETSC_NULL);
154: #endif

156:   PetscHeaderCreate(*mapping,_p_ISLocalToGlobalMapping,int,IS_LTOGM_CLASSID,0,"ISLocalToGlobalMapping","Local to global mapping","IS",
157:                            cm,ISLocalToGlobalMappingDestroy,ISLocalToGlobalMappingView);
158:   (*mapping)->n       = n;
159:   /*
160:     Do not create the global to local mapping. This is only created if 
161:     ISGlobalToLocalMapping() is called 
162:   */
163:   (*mapping)->globals = 0;
164:   if (mode == PETSC_COPY_VALUES) {
165:     PetscMalloc(n*sizeof(PetscInt),&in);
166:     PetscMemcpy(in,indices,n*sizeof(PetscInt));
167:     PetscLogObjectMemory(*mapping,n*sizeof(PetscInt));
168:     (*mapping)->indices = in;
169:   } else if (mode == PETSC_OWN_POINTER) {
170:     (*mapping)->indices = (PetscInt*)indices;
171:   } else SETERRQ(cm,PETSC_ERR_SUP,"Cannot currently use PETSC_USE_POINTER");
172:   return(0);
173: }

177: /*@
178:     ISLocalToGlobalMappingBlock - Creates a blocked index version of an 
179:        ISLocalToGlobalMapping that is appropriate for MatSetLocalToGlobalMappingBlock()
180:        and VecSetLocalToGlobalMappingBlock().

182:     Not Collective, but communicator may have more than one process

184:     Input Parameters:
185: +    inmap - original point-wise mapping
186: -    bs - block size

188:     Output Parameter:
189: .   outmap - block based mapping; the indices are relative to BLOCKS, not individual vector or matrix entries.

191:     Level: advanced

193:     Concepts: mapping^local to global

195: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS()
196: @*/
197: PetscErrorCode  ISLocalToGlobalMappingBlock(ISLocalToGlobalMapping inmap,PetscInt bs,ISLocalToGlobalMapping *outmap)
198: {
200:   PetscInt       *ii,i,n;

205:   if (bs > 1) {
206:     n    = inmap->n/bs;
207:     if (n*bs != inmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Pointwise mapping length is not divisible by block size");
208:     PetscMalloc(n*sizeof(PetscInt),&ii);
209:     for (i=0; i<n; i++) {
210:       ii[i] = inmap->indices[bs*i]/bs;
211:     }
212:     ISLocalToGlobalMappingCreate(((PetscObject)inmap)->comm,n,ii,PETSC_OWN_POINTER,outmap);
213:   } else {
214:     PetscObjectReference((PetscObject)inmap);
215:     *outmap = inmap;
216:   }
217:   return(0);
218: }

222: /*@
223:     ISLocalToGlobalMappingUnBlock - Creates a scalar index version of a blocked
224:        ISLocalToGlobalMapping

226:     Not Collective, but communicator may have more than one process

228:     Input Parameter:
229: + inmap - block based mapping; the indices are relative to BLOCKS, not individual vector or matrix entries.
230: - bs - block size

232:     Output Parameter:
233: .   outmap - pointwise mapping

235:     Level: advanced

237:     Concepts: mapping^local to global

239: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingBlock()
240: @*/
241: PetscErrorCode  ISLocalToGlobalMappingUnBlock(ISLocalToGlobalMapping inmap,PetscInt bs,ISLocalToGlobalMapping *outmap)
242: {
244:   PetscInt       *ii,i,n;

249:   if (bs > 1) {
250:     n    = inmap->n*bs;
251:     PetscMalloc(n*sizeof(PetscInt),&ii);
252:     for (i=0; i<n; i++) {
253:       ii[i] = inmap->indices[i/bs]*bs + (i%bs);
254:     }
255:     ISLocalToGlobalMappingCreate(((PetscObject)inmap)->comm,n,ii,PETSC_OWN_POINTER,outmap);
256:   } else {
257:     PetscObjectReference((PetscObject)inmap);
258:     *outmap = inmap;
259:   }
260:   return(0);
261: }

265: /*@
266:    ISLocalToGlobalMappingDestroy - Destroys a mapping between a local (0 to n)
267:    ordering and a global parallel ordering.

269:    Note Collective

271:    Input Parameters:
272: .  mapping - mapping data structure

274:    Level: advanced

276: .seealso: ISLocalToGlobalMappingCreate()
277: @*/
278: PetscErrorCode  ISLocalToGlobalMappingDestroy(ISLocalToGlobalMapping *mapping)
279: {
282:   if (!*mapping) return(0);
284:   if (--((PetscObject)(*mapping))->refct > 0) {*mapping = 0;return(0);}
285:   PetscFree((*mapping)->indices);
286:   PetscFree((*mapping)->globals);
287:   PetscHeaderDestroy(mapping);
288:   *mapping = 0;
289:   return(0);
290: }
291: 
294: /*@
295:     ISLocalToGlobalMappingApplyIS - Creates from an IS in the local numbering
296:     a new index set using the global numbering defined in an ISLocalToGlobalMapping
297:     context.

299:     Not collective

301:     Input Parameters:
302: +   mapping - mapping between local and global numbering
303: -   is - index set in local numbering

305:     Output Parameters:
306: .   newis - index set in global numbering

308:     Level: advanced

310:     Concepts: mapping^local to global

312: .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
313:           ISLocalToGlobalMappingDestroy(), ISGlobalToLocalMappingApply()
314: @*/
315: PetscErrorCode  ISLocalToGlobalMappingApplyIS(ISLocalToGlobalMapping mapping,IS is,IS *newis)
316: {
318:   PetscInt       n,i,*idxmap,*idxout,Nmax = mapping->n;
319:   const PetscInt *idxin;


326:   ISGetLocalSize(is,&n);
327:   ISGetIndices(is,&idxin);
328:   idxmap = mapping->indices;
329: 
330:   PetscMalloc(n*sizeof(PetscInt),&idxout);
331:   for (i=0; i<n; i++) {
332:     if (idxin[i] >= Nmax) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local index %d too large %d (max) at %d",idxin[i],Nmax-1,i);
333:     idxout[i] = idxmap[idxin[i]];
334:   }
335:   ISRestoreIndices(is,&idxin);
336:   ISCreateGeneral(PETSC_COMM_SELF,n,idxout,PETSC_OWN_POINTER,newis);
337:   return(0);
338: }

340: /*MC
341:    ISLocalToGlobalMappingApply - Takes a list of integers in a local numbering
342:    and converts them to the global numbering.

344:    Synopsis:
345:    PetscErrorCode ISLocalToGlobalMappingApply(ISLocalToGlobalMapping mapping,int N,int in[],int out[])

347:    Not collective

349:    Input Parameters:
350: +  mapping - the local to global mapping context
351: .  N - number of integers
352: -  in - input indices in local numbering

354:    Output Parameter:
355: .  out - indices in global numbering

357:    Notes: 
358:    The in and out array parameters may be identical.

360:    Level: advanced

362: .seealso: ISLocalToGlobalMappingCreate(),ISLocalToGlobalMappingDestroy(), 
363:           ISLocalToGlobalMappingApplyIS(),AOCreateBasic(),AOApplicationToPetsc(),
364:           AOPetscToApplication(), ISGlobalToLocalMappingApply()

366:     Concepts: mapping^local to global

368: M*/

370: /* -----------------------------------------------------------------------------------------*/

374: /*
375:     Creates the global fields in the ISLocalToGlobalMapping structure
376: */
377: static PetscErrorCode ISGlobalToLocalMappingSetUp_Private(ISLocalToGlobalMapping mapping)
378: {
380:   PetscInt       i,*idx = mapping->indices,n = mapping->n,end,start,*globals;

383:   end   = 0;
384:   start = 100000000;

386:   for (i=0; i<n; i++) {
387:     if (idx[i] < 0) continue;
388:     if (idx[i] < start) start = idx[i];
389:     if (idx[i] > end)   end   = idx[i];
390:   }
391:   if (start > end) {start = 0; end = -1;}
392:   mapping->globalstart = start;
393:   mapping->globalend   = end;

395:   PetscMalloc((end-start+2)*sizeof(PetscInt),&globals);
396:   mapping->globals = globals;
397:   for (i=0; i<end-start+1; i++) {
398:     globals[i] = -1;
399:   }
400:   for (i=0; i<n; i++) {
401:     if (idx[i] < 0) continue;
402:     globals[idx[i] - start] = i;
403:   }

405:   PetscLogObjectMemory(mapping,(end-start+1)*sizeof(PetscInt));
406:   return(0);
407: }

411: /*@
412:     ISGlobalToLocalMappingApply - Provides the local numbering for a list of integers
413:     specified with a global numbering.

415:     Not collective

417:     Input Parameters:
418: +   mapping - mapping between local and global numbering
419: .   type - IS_GTOLM_MASK - replaces global indices with no local value with -1
420:            IS_GTOLM_DROP - drops the indices with no local value from the output list
421: .   n - number of global indices to map
422: -   idx - global indices to map

424:     Output Parameters:
425: +   nout - number of indices in output array (if type == IS_GTOLM_MASK then nout = n)
426: -   idxout - local index of each global index, one must pass in an array long enough 
427:              to hold all the indices. You can call ISGlobalToLocalMappingApply() with 
428:              idxout == PETSC_NULL to determine the required length (returned in nout)
429:              and then allocate the required space and call ISGlobalToLocalMappingApply()
430:              a second time to set the values.

432:     Notes:
433:     Either nout or idxout may be PETSC_NULL. idx and idxout may be identical.

435:     This is not scalable in memory usage. Each processor requires O(Nglobal) size 
436:     array to compute these.

438:     Level: advanced

440:     Concepts: mapping^global to local

442: .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
443:           ISLocalToGlobalMappingDestroy()
444: @*/
445: PetscErrorCode  ISGlobalToLocalMappingApply(ISLocalToGlobalMapping mapping,ISGlobalToLocalMappingType type,
446:                                   PetscInt n,const PetscInt idx[],PetscInt *nout,PetscInt idxout[])
447: {
448:   PetscInt       i,*globals,nf = 0,tmp,start,end;

453:   if (!mapping->globals) {
454:     ISGlobalToLocalMappingSetUp_Private(mapping);
455:   }
456:   globals = mapping->globals;
457:   start   = mapping->globalstart;
458:   end     = mapping->globalend;

460:   if (type == IS_GTOLM_MASK) {
461:     if (idxout) {
462:       for (i=0; i<n; i++) {
463:         if (idx[i] < 0) idxout[i] = idx[i];
464:         else if (idx[i] < start) idxout[i] = -1;
465:         else if (idx[i] > end)   idxout[i] = -1;
466:         else                     idxout[i] = globals[idx[i] - start];
467:       }
468:     }
469:     if (nout) *nout = n;
470:   } else {
471:     if (idxout) {
472:       for (i=0; i<n; i++) {
473:         if (idx[i] < 0) continue;
474:         if (idx[i] < start) continue;
475:         if (idx[i] > end) continue;
476:         tmp = globals[idx[i] - start];
477:         if (tmp < 0) continue;
478:         idxout[nf++] = tmp;
479:       }
480:     } else {
481:       for (i=0; i<n; i++) {
482:         if (idx[i] < 0) continue;
483:         if (idx[i] < start) continue;
484:         if (idx[i] > end) continue;
485:         tmp = globals[idx[i] - start];
486:         if (tmp < 0) continue;
487:         nf++;
488:       }
489:     }
490:     if (nout) *nout = nf;
491:   }

493:   return(0);
494: }

498: /*@C
499:     ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and 
500:      each index shared by more than one processor 

502:     Collective on ISLocalToGlobalMapping

504:     Input Parameters:
505: .   mapping - the mapping from local to global indexing

507:     Output Parameter:
508: +   nproc - number of processors that are connected to this one
509: .   proc - neighboring processors
510: .   numproc - number of indices for each subdomain (processor)
511: -   indices - indices of nodes (in local numbering) shared with neighbors (sorted by global numbering)

513:     Level: advanced

515:     Concepts: mapping^local to global

517:     Fortran Usage: 
518: $        ISLocalToGlobalMpngGetInfoSize(ISLocalToGlobalMapping,PetscInt nproc,PetscInt numprocmax,ierr) followed by 
519: $        ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping,PetscInt nproc, PetscInt procs[nproc],PetscInt numprocs[nproc],
520:           PetscInt indices[nproc][numprocmax],ierr)
521:         There is no ISLocalToGlobalMappingRestoreInfo() in Fortran. You must make sure that procs[], numprocs[] and 
522:         indices[][] are large enough arrays, either by allocating them dynamically or defining static ones large enough.


525: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
526:           ISLocalToGlobalMappingRestoreInfo()
527: @*/
528: PetscErrorCode  ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
529: {
531:   PetscMPIInt    size,rank,tag1,tag2,tag3,*len,*source,imdex;
532:   PetscInt       i,n = mapping->n,Ng,ng,max = 0,*lindices = mapping->indices;
533:   PetscInt       *nprocs,*owner,nsends,*sends,j,*starts,nmax,nrecvs,*recvs,proc;
534:   PetscInt       cnt,scale,*ownedsenders,*nownedsenders,rstart,nowned;
535:   PetscInt       node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs,*tmp;
536:   PetscInt       first_procs,first_numprocs,*first_indices;
537:   MPI_Request    *recv_waits,*send_waits;
538:   MPI_Status     recv_status,*send_status,*recv_statuses;
539:   MPI_Comm       comm = ((PetscObject)mapping)->comm;
540:   PetscBool      debug = PETSC_FALSE;

544:   MPI_Comm_size(comm,&size);
545:   MPI_Comm_rank(comm,&rank);
546:   if (size == 1) {
547:     *nproc         = 0;
548:     *procs         = PETSC_NULL;
549:     PetscMalloc(sizeof(PetscInt),numprocs);
550:     (*numprocs)[0] = 0;
551:     PetscMalloc(sizeof(PetscInt*),indices);
552:     (*indices)[0]  = PETSC_NULL;
553:     return(0);
554:   }

556:   PetscOptionsGetBool(PETSC_NULL,"-islocaltoglobalmappinggetinfo_debug",&debug,PETSC_NULL);

558:   /*
559:     Notes on ISLocalToGlobalMappingGetInfo

561:     globally owned node - the nodes that have been assigned to this processor in global
562:            numbering, just for this routine.

564:     nontrivial globally owned node - node assigned to this processor that is on a subdomain
565:            boundary (i.e. is has more than one local owner)

567:     locally owned node - node that exists on this processors subdomain

569:     nontrivial locally owned node - node that is not in the interior (i.e. has more than one
570:            local subdomain
571:   */
572:   PetscObjectGetNewTag((PetscObject)mapping,&tag1);
573:   PetscObjectGetNewTag((PetscObject)mapping,&tag2);
574:   PetscObjectGetNewTag((PetscObject)mapping,&tag3);

576:   for (i=0; i<n; i++) {
577:     if (lindices[i] > max) max = lindices[i];
578:   }
579:   MPI_Allreduce(&max,&Ng,1,MPIU_INT,MPI_MAX,comm);
580:   Ng++;
581:   MPI_Comm_size(comm,&size);
582:   MPI_Comm_rank(comm,&rank);
583:   scale  = Ng/size + 1;
584:   ng     = scale; if (rank == size-1) ng = Ng - scale*(size-1); ng = PetscMax(1,ng);
585:   rstart = scale*rank;

587:   /* determine ownership ranges of global indices */
588:   PetscMalloc(2*size*sizeof(PetscInt),&nprocs);
589:   PetscMemzero(nprocs,2*size*sizeof(PetscInt));

591:   /* determine owners of each local node  */
592:   PetscMalloc(n*sizeof(PetscInt),&owner);
593:   for (i=0; i<n; i++) {
594:     proc             = lindices[i]/scale; /* processor that globally owns this index */
595:     nprocs[2*proc+1] = 1;                 /* processor globally owns at least one of ours */
596:     owner[i]         = proc;
597:     nprocs[2*proc]++;                     /* count of how many that processor globally owns of ours */
598:   }
599:   nsends = 0; for (i=0; i<size; i++) nsends += nprocs[2*i+1];
600:   PetscInfo1(mapping,"Number of global owners for my local data %d\n",nsends);

602:   /* inform other processors of number of messages and max length*/
603:   PetscMaxSum(comm,nprocs,&nmax,&nrecvs);
604:   PetscInfo1(mapping,"Number of local owners for my global data %d\n",nrecvs);

606:   /* post receives for owned rows */
607:   PetscMalloc((2*nrecvs+1)*(nmax+1)*sizeof(PetscInt),&recvs);
608:   PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);
609:   for (i=0; i<nrecvs; i++) {
610:     MPI_Irecv(recvs+2*nmax*i,2*nmax,MPIU_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+i);
611:   }

613:   /* pack messages containing lists of local nodes to owners */
614:   PetscMalloc((2*n+1)*sizeof(PetscInt),&sends);
615:   PetscMalloc((size+1)*sizeof(PetscInt),&starts);
616:   starts[0]  = 0;
617:   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}
618:   for (i=0; i<n; i++) {
619:     sends[starts[owner[i]]++] = lindices[i];
620:     sends[starts[owner[i]]++] = i;
621:   }
622:   PetscFree(owner);
623:   starts[0]  = 0;
624:   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}

626:   /* send the messages */
627:   PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);
628:   PetscMalloc((nsends+1)*sizeof(PetscInt),&dest);
629:   cnt = 0;
630:   for (i=0; i<size; i++) {
631:     if (nprocs[2*i]) {
632:       MPI_Isend(sends+starts[i],2*nprocs[2*i],MPIU_INT,i,tag1,comm,send_waits+cnt);
633:       dest[cnt] = i;
634:       cnt++;
635:     }
636:   }
637:   PetscFree(starts);

639:   /* wait on receives */
640:   PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&source);
641:   PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&len);
642:   cnt  = nrecvs;
643:   PetscMalloc((ng+1)*sizeof(PetscInt),&nownedsenders);
644:   PetscMemzero(nownedsenders,ng*sizeof(PetscInt));
645:   while (cnt) {
646:     MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);
647:     /* unpack receives into our local space */
648:     MPI_Get_count(&recv_status,MPIU_INT,&len[imdex]);
649:     source[imdex]  = recv_status.MPI_SOURCE;
650:     len[imdex]     = len[imdex]/2;
651:     /* count how many local owners for each of my global owned indices */
652:     for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++;
653:     cnt--;
654:   }
655:   PetscFree(recv_waits);

657:   /* count how many globally owned indices are on an edge multiplied by how many processors own them. */
658:   nowned  = 0;
659:   nownedm = 0;
660:   for (i=0; i<ng; i++) {
661:     if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;}
662:   }

664:   /* create single array to contain rank of all local owners of each globally owned index */
665:   PetscMalloc((nownedm+1)*sizeof(PetscInt),&ownedsenders);
666:   PetscMalloc((ng+1)*sizeof(PetscInt),&starts);
667:   starts[0] = 0;
668:   for (i=1; i<ng; i++) {
669:     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
670:     else starts[i] = starts[i-1];
671:   }

673:   /* for each nontrival globally owned node list all arriving processors */
674:   for (i=0; i<nrecvs; i++) {
675:     for (j=0; j<len[i]; j++) {
676:       node = recvs[2*i*nmax+2*j]-rstart;
677:       if (nownedsenders[node] > 1) {
678:         ownedsenders[starts[node]++] = source[i];
679:       }
680:     }
681:   }

683:   if (debug) { /* -----------------------------------  */
684:     starts[0]    = 0;
685:     for (i=1; i<ng; i++) {
686:       if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
687:       else starts[i] = starts[i-1];
688:     }
689:     for (i=0; i<ng; i++) {
690:       if (nownedsenders[i] > 1) {
691:         PetscSynchronizedPrintf(comm,"[%d] global node %d local owner processors: ",rank,i+rstart);
692:         for (j=0; j<nownedsenders[i]; j++) {
693:           PetscSynchronizedPrintf(comm,"%d ",ownedsenders[starts[i]+j]);
694:         }
695:         PetscSynchronizedPrintf(comm,"\n");
696:       }
697:     }
698:     PetscSynchronizedFlush(comm);
699:   }/* -----------------------------------  */

701:   /* wait on original sends */
702:   if (nsends) {
703:     PetscMalloc(nsends*sizeof(MPI_Status),&send_status);
704:     MPI_Waitall(nsends,send_waits,send_status);
705:     PetscFree(send_status);
706:   }
707:   PetscFree(send_waits);
708:   PetscFree(sends);
709:   PetscFree(nprocs);

711:   /* pack messages to send back to local owners */
712:   starts[0]    = 0;
713:   for (i=1; i<ng; i++) {
714:     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
715:     else starts[i] = starts[i-1];
716:   }
717:   nsends2 = nrecvs;
718:   PetscMalloc((nsends2+1)*sizeof(PetscInt),&nprocs); /* length of each message */
719:   for (i=0; i<nrecvs; i++) {
720:     nprocs[i] = 1;
721:     for (j=0; j<len[i]; j++) {
722:       node = recvs[2*i*nmax+2*j]-rstart;
723:       if (nownedsenders[node] > 1) {
724:         nprocs[i] += 2 + nownedsenders[node];
725:       }
726:     }
727:   }
728:   nt = 0; for (i=0; i<nsends2; i++) nt += nprocs[i];
729:   PetscMalloc((nt+1)*sizeof(PetscInt),&sends2);
730:   PetscMalloc((nsends2+1)*sizeof(PetscInt),&starts2);
731:   starts2[0] = 0; for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1];
732:   /*
733:      Each message is 1 + nprocs[i] long, and consists of 
734:        (0) the number of nodes being sent back 
735:        (1) the local node number,
736:        (2) the number of processors sharing it,
737:        (3) the processors sharing it
738:   */
739:   for (i=0; i<nsends2; i++) {
740:     cnt = 1;
741:     sends2[starts2[i]] = 0;
742:     for (j=0; j<len[i]; j++) {
743:       node = recvs[2*i*nmax+2*j]-rstart;
744:       if (nownedsenders[node] > 1) {
745:         sends2[starts2[i]]++;
746:         sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1];
747:         sends2[starts2[i]+cnt++] = nownedsenders[node];
748:         PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(PetscInt));
749:         cnt += nownedsenders[node];
750:       }
751:     }
752:   }

754:   /* receive the message lengths */
755:   nrecvs2 = nsends;
756:   PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&lens2);
757:   PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&starts3);
758:   PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);
759:   for (i=0; i<nrecvs2; i++) {
760:     MPI_Irecv(&lens2[i],1,MPIU_INT,dest[i],tag2,comm,recv_waits+i);
761:   }

763:   /* send the message lengths */
764:   for (i=0; i<nsends2; i++) {
765:     MPI_Send(&nprocs[i],1,MPIU_INT,source[i],tag2,comm);
766:   }

768:   /* wait on receives of lens */
769:   if (nrecvs2) {
770:     PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);
771:     MPI_Waitall(nrecvs2,recv_waits,recv_statuses);
772:     PetscFree(recv_statuses);
773:   }
774:   PetscFree(recv_waits);

776:   starts3[0] = 0;
777:   nt         = 0;
778:   for (i=0; i<nrecvs2-1; i++) {
779:     starts3[i+1] = starts3[i] + lens2[i];
780:     nt          += lens2[i];
781:   }
782:   nt += lens2[nrecvs2-1];

784:   PetscMalloc((nt+1)*sizeof(PetscInt),&recvs2);
785:   PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);
786:   for (i=0; i<nrecvs2; i++) {
787:     MPI_Irecv(recvs2+starts3[i],lens2[i],MPIU_INT,dest[i],tag3,comm,recv_waits+i);
788:   }
789: 
790:   /* send the messages */
791:   PetscMalloc((nsends2+1)*sizeof(MPI_Request),&send_waits);
792:   for (i=0; i<nsends2; i++) {
793:     MPI_Isend(sends2+starts2[i],nprocs[i],MPIU_INT,source[i],tag3,comm,send_waits+i);
794:   }

796:   /* wait on receives */
797:   if (nrecvs2) {
798:     PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);
799:     MPI_Waitall(nrecvs2,recv_waits,recv_statuses);
800:     PetscFree(recv_statuses);
801:   }
802:   PetscFree(recv_waits);
803:   PetscFree(nprocs);

805:   if (debug) { /* -----------------------------------  */
806:     cnt = 0;
807:     for (i=0; i<nrecvs2; i++) {
808:       nt = recvs2[cnt++];
809:       for (j=0; j<nt; j++) {
810:         PetscSynchronizedPrintf(comm,"[%d] local node %d number of subdomains %d: ",rank,recvs2[cnt],recvs2[cnt+1]);
811:         for (k=0; k<recvs2[cnt+1]; k++) {
812:           PetscSynchronizedPrintf(comm,"%d ",recvs2[cnt+2+k]);
813:         }
814:         cnt += 2 + recvs2[cnt+1];
815:         PetscSynchronizedPrintf(comm,"\n");
816:       }
817:     }
818:     PetscSynchronizedFlush(comm);
819:   } /* -----------------------------------  */

821:   /* count number subdomains for each local node */
822:   PetscMalloc(size*sizeof(PetscInt),&nprocs);
823:   PetscMemzero(nprocs,size*sizeof(PetscInt));
824:   cnt  = 0;
825:   for (i=0; i<nrecvs2; i++) {
826:     nt = recvs2[cnt++];
827:     for (j=0; j<nt; j++) {
828:       for (k=0; k<recvs2[cnt+1]; k++) {
829:         nprocs[recvs2[cnt+2+k]]++;
830:       }
831:       cnt += 2 + recvs2[cnt+1];
832:     }
833:   }
834:   nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0);
835:   *nproc    = nt;
836:   PetscMalloc((nt+1)*sizeof(PetscInt),procs);
837:   PetscMalloc((nt+1)*sizeof(PetscInt),numprocs);
838:   PetscMalloc((nt+1)*sizeof(PetscInt*),indices);
839:   PetscMalloc(size*sizeof(PetscInt),&bprocs);
840:   cnt       = 0;
841:   for (i=0; i<size; i++) {
842:     if (nprocs[i] > 0) {
843:       bprocs[i]        = cnt;
844:       (*procs)[cnt]    = i;
845:       (*numprocs)[cnt] = nprocs[i];
846:       PetscMalloc(nprocs[i]*sizeof(PetscInt),&(*indices)[cnt]);
847:       cnt++;
848:     }
849:   }

851:   /* make the list of subdomains for each nontrivial local node */
852:   PetscMemzero(*numprocs,nt*sizeof(PetscInt));
853:   cnt  = 0;
854:   for (i=0; i<nrecvs2; i++) {
855:     nt = recvs2[cnt++];
856:     for (j=0; j<nt; j++) {
857:       for (k=0; k<recvs2[cnt+1]; k++) {
858:         (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt];
859:       }
860:       cnt += 2 + recvs2[cnt+1];
861:     }
862:   }
863:   PetscFree(bprocs);
864:   PetscFree(recvs2);

866:   /* sort the node indexing by their global numbers */
867:   nt = *nproc;
868:   for (i=0; i<nt; i++) {
869:     PetscMalloc(((*numprocs)[i])*sizeof(PetscInt),&tmp);
870:     for (j=0; j<(*numprocs)[i]; j++) {
871:       tmp[j] = lindices[(*indices)[i][j]];
872:     }
873:     PetscSortIntWithArray((*numprocs)[i],tmp,(*indices)[i]);
874:     PetscFree(tmp);
875:   }

877:   if (debug) { /* -----------------------------------  */
878:     nt = *nproc;
879:     for (i=0; i<nt; i++) {
880:       PetscSynchronizedPrintf(comm,"[%d] subdomain %d number of indices %d: ",rank,(*procs)[i],(*numprocs)[i]);
881:       for (j=0; j<(*numprocs)[i]; j++) {
882:         PetscSynchronizedPrintf(comm,"%d ",(*indices)[i][j]);
883:       }
884:       PetscSynchronizedPrintf(comm,"\n");
885:     }
886:     PetscSynchronizedFlush(comm);
887:   } /* -----------------------------------  */

889:   /* wait on sends */
890:   if (nsends2) {
891:     PetscMalloc(nsends2*sizeof(MPI_Status),&send_status);
892:     MPI_Waitall(nsends2,send_waits,send_status);
893:     PetscFree(send_status);
894:   }

896:   PetscFree(starts3);
897:   PetscFree(dest);
898:   PetscFree(send_waits);

900:   PetscFree(nownedsenders);
901:   PetscFree(ownedsenders);
902:   PetscFree(starts);
903:   PetscFree(starts2);
904:   PetscFree(lens2);

906:   PetscFree(source);
907:   PetscFree(len);
908:   PetscFree(recvs);
909:   PetscFree(nprocs);
910:   PetscFree(sends2);

912:   /* put the information about myself as the first entry in the list */
913:   first_procs    = (*procs)[0];
914:   first_numprocs = (*numprocs)[0];
915:   first_indices  = (*indices)[0];
916:   for (i=0; i<*nproc; i++) {
917:     if ((*procs)[i] == rank) {
918:       (*procs)[0]    = (*procs)[i];
919:       (*numprocs)[0] = (*numprocs)[i];
920:       (*indices)[0]  = (*indices)[i];
921:       (*procs)[i]    = first_procs;
922:       (*numprocs)[i] = first_numprocs;
923:       (*indices)[i]  = first_indices;
924:       break;
925:     }
926:   }
927:   return(0);
928: }

932: /*@C
933:     ISLocalToGlobalMappingRestoreInfo - Frees the memory allocated by ISLocalToGlobalMappingGetInfo()

935:     Collective on ISLocalToGlobalMapping

937:     Input Parameters:
938: .   mapping - the mapping from local to global indexing

940:     Output Parameter:
941: +   nproc - number of processors that are connected to this one
942: .   proc - neighboring processors
943: .   numproc - number of indices for each processor
944: -   indices - indices of local nodes shared with neighbor (sorted by global numbering)

946:     Level: advanced

948: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
949:           ISLocalToGlobalMappingGetInfo()
950: @*/
951: PetscErrorCode  ISLocalToGlobalMappingRestoreInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
952: {
954:   PetscInt       i;

957:   PetscFree(*procs);
958:   PetscFree(*numprocs);
959:   if (*indices) {
960:     PetscFree((*indices)[0]);
961:     for (i=1; i<*nproc; i++) {
962:       PetscFree((*indices)[i]);
963:     }
964:     PetscFree(*indices);
965:   }
966:   return(0);
967: }

971: /*@C
972:    ISLocalToGlobalMappingGetIndices - Get global indices for every local point

974:    Not Collective

976:    Input Arguments:
977: . ltog - local to global mapping

979:    Output Arguments:
980: . array - array of indices

982:    Level: advanced

984: .seealso: ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingRestoreIndices()
985: @*/
986: PetscErrorCode  ISLocalToGlobalMappingGetIndices(ISLocalToGlobalMapping ltog,const PetscInt **array)
987: {

992:   *array = ltog->indices;
993:   return(0);
994: }

998: /*@C
999:    ISLocalToGlobalMappingRestoreIndices - Restore indices obtained with ISLocalToGlobalMappingRestoreIndices()

1001:    Not Collective

1003:    Input Arguments:
1004: + ltog - local to global mapping
1005: - array - array of indices

1007:    Level: advanced

1009: .seealso: ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingGetIndices()
1010: @*/
1011: PetscErrorCode  ISLocalToGlobalMappingRestoreIndices(ISLocalToGlobalMapping ltog,const PetscInt **array)
1012: {

1017:   if (*array != ltog->indices) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_BADPTR,"Trying to return mismatched pointer");
1018:   *array = PETSC_NULL;
1019:   return(0);
1020: }

1024: /*@C
1025:    ISLocalToGlobalMappingConcatenate - Create a new mapping that concatenates a list of mappings

1027:    Not Collective

1029:    Input Arguments:
1030: + comm - communicator for the new mapping, must contain the communicator of every mapping to concatenate
1031: . n - number of mappings to concatenate
1032: - ltogs - local to global mappings

1034:    Output Arguments:
1035: . ltogcat - new mapping

1037:    Level: advanced

1039: .seealso: ISLocalToGlobalMappingCreate()
1040: @*/
1041: PetscErrorCode ISLocalToGlobalMappingConcatenate(MPI_Comm comm,PetscInt n,const ISLocalToGlobalMapping ltogs[],ISLocalToGlobalMapping *ltogcat)
1042: {
1043:   PetscInt       i,cnt,m,*idx;

1047:   if (n < 0) SETERRQ1(comm,PETSC_ERR_ARG_OUTOFRANGE,"Must have a non-negative number of mappings, given %D",n);
1051:   for (cnt=0,i=0; i<n; i++) {
1052:     ISLocalToGlobalMappingGetSize(ltogs[i],&m);
1053:     cnt += m;
1054:   }
1055:   PetscMalloc(cnt*sizeof(PetscInt),&idx);
1056:   for (cnt=0,i=0; i<n; i++) {
1057:     const PetscInt *subidx;
1058:     ISLocalToGlobalMappingGetSize(ltogs[i],&m);
1059:     ISLocalToGlobalMappingGetIndices(ltogs[i],&subidx);
1060:     PetscMemcpy(&idx[cnt],subidx,m*sizeof(PetscInt));
1061:     ISLocalToGlobalMappingRestoreIndices(ltogs[i],&subidx);
1062:     cnt += m;
1063:   }
1064:   ISLocalToGlobalMappingCreate(comm,cnt,idx,PETSC_OWN_POINTER,ltogcat);
1065:   return(0);
1066: }