Actual source code: subcomm.c
2: /*
3: Provides utility routines for split MPI communicator.
4: */
5: #include <petscsys.h> /*I "petscsys.h" I*/
12: /*@C
13: PetscSubcommSetNumber - Set total number of subcommunicators.
15: Collective on MPI_Comm
17: Input Parameter:
18: + psubcomm - PetscSubcomm context
19: - nsubcomm - the total number of subcommunicators in psubcomm
21: Level: advanced
23: .keywords: communicator
25: .seealso: PetscSubcommCreate(),PetscSubcommDestroy(),PetscSubcommSetType(),PetscSubcommSetTypeGeneral()
26: @*/
27: PetscErrorCode PetscSubcommSetNumber(PetscSubcomm psubcomm,PetscInt nsubcomm)
28: {
30: MPI_Comm comm=psubcomm->parent;
31: PetscMPIInt rank,size;
34: if (!psubcomm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"PetscSubcomm is not created. Call PetscSubcommCreate() first");
35: MPI_Comm_rank(comm,&rank);
36: MPI_Comm_size(comm,&size);
37: if (nsubcomm < 1 || nsubcomm > size) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE, "Num of subcommunicators %D cannot be < 1 or > input comm size %D",nsubcomm,size);
39: psubcomm->n = nsubcomm;
40: return(0);
41: }
45: /*@C
46: PetscSubcommSetType - Set type of subcommunicators.
48: Collective on MPI_Comm
50: Input Parameter:
51: + psubcomm - PetscSubcomm context
52: - subcommtype - subcommunicator type, PETSC_SUBCOMM_CONTIGUOUS,PETSC_SUBCOMM_INTERLACED
54: Level: advanced
56: .keywords: communicator
58: .seealso: PetscSubcommCreate(),PetscSubcommDestroy(),PetscSubcommSetNumber(),PetscSubcommSetTypeGeneral()
59: @*/
60: PetscErrorCode PetscSubcommSetType(PetscSubcomm psubcomm,const PetscSubcommType subcommtype)
61: {
65: if (!psubcomm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"PetscSubcomm is not created. Call PetscSubcommCreate()");
66: if (psubcomm->n < 1) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"number of subcommunicators %D is incorrect. Call PetscSubcommSetNumber()",psubcomm->n);
68: if (subcommtype == PETSC_SUBCOMM_CONTIGUOUS){
69: PetscSubcommCreate_contiguous(psubcomm);
70: } else if (subcommtype == PETSC_SUBCOMM_INTERLACED){
71: PetscSubcommCreate_interlaced(psubcomm);
72: } else {
73: SETERRQ1(psubcomm->parent,PETSC_ERR_SUP,"PetscSubcommType %D is not supported yet",subcommtype);
74: }
75: return(0);
76: }
80: /*@C
81: PetscSubcommSetTypeGeneral - Set type of subcommunicators from user's specifications
83: Collective on MPI_Comm
85: Input Parameter:
86: + psubcomm - PetscSubcomm context
87: . color - control of subset assignment (nonnegative integer). Processes with the same color are in the same subcommunicator.
88: . subrank - rank in the subcommunicator
89: - duprank - rank in the dupparent (see PetscSubcomm)
91: Level: advanced
93: .keywords: communicator, create
95: .seealso: PetscSubcommCreate(),PetscSubcommDestroy(),PetscSubcommSetNumber(),PetscSubcommSetType()
96: @*/
97: PetscErrorCode PetscSubcommSetTypeGeneral(PetscSubcomm psubcomm,PetscMPIInt color,PetscMPIInt subrank,PetscMPIInt duprank)
98: {
100: MPI_Comm subcomm=0,dupcomm=0,comm=psubcomm->parent;
101: PetscMPIInt size;
104: if (!psubcomm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"PetscSubcomm is not created. Call PetscSubcommCreate()");
105: if (psubcomm->n < 1) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"number of subcommunicators %D is incorrect. Call PetscSubcommSetNumber()",psubcomm->n);
107: MPI_Comm_split(comm,color,subrank,&subcomm);
109: /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm
110: if duprank is not a valid number, then dupcomm is not created - not all applications require dupcomm! */
111: MPI_Comm_size(comm,&size);
112: if (duprank == PETSC_DECIDE){
113: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"duprank==PETSC_DECIDE is not supported yet");
114: } else if (duprank >= 0 && duprank < size){
115: MPI_Comm_split(comm,0,duprank,&dupcomm);
116: }
117: PetscCommDuplicate(dupcomm,&psubcomm->dupparent,PETSC_NULL);
118: PetscCommDuplicate(subcomm,&psubcomm->comm,PETSC_NULL);
119: MPI_Comm_free(&dupcomm);
120: MPI_Comm_free(&subcomm);
121: psubcomm->color = color;
122: return(0);
123: }
127: PetscErrorCode PetscSubcommDestroy(PetscSubcomm *psubcomm)
128: {
132: if (!*psubcomm) return(0);
133: PetscCommDestroy(&(*psubcomm)->dupparent);
134: PetscCommDestroy(&(*psubcomm)->comm);
135: PetscFree((*psubcomm));
136: return(0);
137: }
141: /*@C
142: PetscSubcommCreate - Create a PetscSubcomm context.
144: Collective on MPI_Comm
146: Input Parameter:
147: + comm - MPI communicator
148: . nsubcomm - the number of subcommunicators to be created
149: - subcommtype - subcommunicator type
151: Output Parameter:
152: . psubcomm - location to store the PetscSubcomm context
154: Level: advanced
156: .keywords: communicator, create
158: .seealso: PetscSubcommDestroy()
159: @*/
160: PetscErrorCode PetscSubcommCreate(MPI_Comm comm,PetscSubcomm *psubcomm)
161: {
165:
166: PetscNew(struct _n_PetscSubcomm,psubcomm);
167: (*psubcomm)->parent = comm;
168: return(0);
169: }
173: PetscErrorCode PetscSubcommCreate_contiguous(PetscSubcomm psubcomm)
174: {
176: PetscMPIInt rank,size,*subsize,duprank=-1,subrank=-1;
177: PetscInt np_subcomm,nleftover,i,color=-1,rankstart,nsubcomm=psubcomm->n;
178: MPI_Comm subcomm=0,dupcomm=0,comm=psubcomm->parent;
181: MPI_Comm_rank(comm,&rank);
182: MPI_Comm_size(comm,&size);
184: /* get size of each subcommunicator */
185: PetscMalloc((1+nsubcomm)*sizeof(PetscMPIInt),&subsize);
186: np_subcomm = size/nsubcomm;
187: nleftover = size - nsubcomm*np_subcomm;
188: for (i=0; i<nsubcomm; i++){
189: subsize[i] = np_subcomm;
190: if (i<nleftover) subsize[i]++;
191: }
193: /* get color and subrank of this proc */
194: rankstart = 0;
195: for (i=0; i<nsubcomm; i++){
196: if ( rank >= rankstart && rank < rankstart+subsize[i]) {
197: color = i;
198: subrank = rank - rankstart;
199: duprank = rank;
200: break;
201: } else {
202: rankstart += subsize[i];
203: }
204: }
205: PetscFree(subsize);
207: MPI_Comm_split(comm,color,subrank,&subcomm);
208:
209: /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */
210: MPI_Comm_split(comm,0,duprank,&dupcomm);
211:
212: PetscCommDuplicate(dupcomm,&psubcomm->dupparent,PETSC_NULL);
213: PetscCommDuplicate(subcomm,&psubcomm->comm,PETSC_NULL);
214: MPI_Comm_free(&dupcomm);
215: MPI_Comm_free(&subcomm);
216: psubcomm->color = color;
217: return(0);
218: }
222: /*
223: Note:
224: In PCREDUNDANT, to avoid data scattering from subcomm back to original comm, we create subcommunicators
225: by iteratively taking a process into a subcommunicator.
226: Example: size=4, nsubcomm=(*psubcomm)->n=3
227: comm=(*psubcomm)->parent:
228: rank: [0] [1] [2] [3]
229: color: 0 1 2 0
231: subcomm=(*psubcomm)->comm:
232: subrank: [0] [0] [0] [1]
234: dupcomm=(*psubcomm)->dupparent:
235: duprank: [0] [2] [3] [1]
237: Here, subcomm[color = 0] has subsize=2, owns process [0] and [3]
238: subcomm[color = 1] has subsize=1, owns process [1]
239: subcomm[color = 2] has subsize=1, owns process [2]
240: dupcomm has same number of processes as comm, and its duprank maps
241: processes in subcomm contiguously into a 1d array:
242: duprank: [0] [1] [2] [3]
243: rank: [0] [3] [1] [2]
244: subcomm[0] subcomm[1] subcomm[2]
245: */
247: PetscErrorCode PetscSubcommCreate_interlaced(PetscSubcomm psubcomm)
248: {
250: PetscMPIInt rank,size,*subsize,duprank,subrank;
251: PetscInt np_subcomm,nleftover,i,j,color,nsubcomm=psubcomm->n;
252: MPI_Comm subcomm=0,dupcomm=0,comm=psubcomm->parent;
255: MPI_Comm_rank(comm,&rank);
256: MPI_Comm_size(comm,&size);
258: /* get size of each subcommunicator */
259: PetscMalloc((1+nsubcomm)*sizeof(PetscMPIInt),&subsize);
260: np_subcomm = size/nsubcomm;
261: nleftover = size - nsubcomm*np_subcomm;
262: for (i=0; i<nsubcomm; i++){
263: subsize[i] = np_subcomm;
264: if (i<nleftover) subsize[i]++;
265: }
267: /* find color for this proc */
268: color = rank%nsubcomm;
269: subrank = rank/nsubcomm;
271: MPI_Comm_split(comm,color,subrank,&subcomm);
273: j = 0; duprank = 0;
274: for (i=0; i<nsubcomm; i++){
275: if (j == color){
276: duprank += subrank;
277: break;
278: }
279: duprank += subsize[i]; j++;
280: }
281: PetscFree(subsize);
282:
283: /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */
284: MPI_Comm_split(comm,0,duprank,&dupcomm);
285:
286: PetscCommDuplicate(dupcomm,&psubcomm->dupparent,PETSC_NULL);
287: PetscCommDuplicate(subcomm,&psubcomm->comm,PETSC_NULL);
288: MPI_Comm_free(&dupcomm);
289: MPI_Comm_free(&subcomm);
290: psubcomm->color = color;
291: return(0);
292: }