Actual source code: sliced.c
1: #include <petscdmsliced.h> /*I "petscdmsliced.h" I*/
2: #include <petscmat.h> /*I "petscmat.h" I*/
3: #include <private/dmimpl.h> /*I "petscmat.h" I*/
5: /* CSR storage of the nonzero structure of a bs*bs matrix */
6: typedef struct {
7: PetscInt bs,nz,*i,*j;
8: } DMSlicedBlockFills;
10: typedef struct {
11: Vec globalvector;
12: PetscInt bs,n,N,Nghosts,*ghosts;
13: PetscInt d_nz,o_nz,*d_nnz,*o_nnz;
14: DMSlicedBlockFills *dfill,*ofill;
15: } DM_Sliced;
19: PetscErrorCode DMGetMatrix_Sliced(DM dm, const MatType mtype,Mat *J)
20: {
21: PetscErrorCode ierr;
22: PetscInt *globals,*sd_nnz,*so_nnz,rstart,bs,i;
23: ISLocalToGlobalMapping lmap,blmap;
24: void (*aij)(void) = PETSC_NULL;
25: DM_Sliced *slice = (DM_Sliced*)dm->data;
28: bs = slice->bs;
29: MatCreate(((PetscObject)dm)->comm,J);
30: MatSetSizes(*J,slice->n*bs,slice->n*bs,PETSC_DETERMINE,PETSC_DETERMINE);
31: MatSetType(*J,mtype);
32: MatSeqBAIJSetPreallocation(*J,bs,slice->d_nz,slice->d_nnz);
33: MatMPIBAIJSetPreallocation(*J,bs,slice->d_nz,slice->d_nnz,slice->o_nz,slice->o_nnz);
34: /* In general, we have to do extra work to preallocate for scalar (AIJ) matrices so we check whether it will do any
35: * good before going on with it. */
36: PetscObjectQueryFunction((PetscObject)*J,"MatMPIAIJSetPreallocation_C",&aij);
37: if (!aij) {
38: PetscObjectQueryFunction((PetscObject)*J,"MatSeqAIJSetPreallocation_C",&aij);
39: }
40: if (aij) {
41: if (bs == 1) {
42: MatSeqAIJSetPreallocation(*J,slice->d_nz,slice->d_nnz);
43: MatMPIAIJSetPreallocation(*J,slice->d_nz,slice->d_nnz,slice->o_nz,slice->o_nnz);
44: } else if (!slice->d_nnz) {
45: MatSeqAIJSetPreallocation(*J,slice->d_nz*bs,PETSC_NULL);
46: MatMPIAIJSetPreallocation(*J,slice->d_nz*bs,PETSC_NULL,slice->o_nz*bs,PETSC_NULL);
47: } else {
48: /* The user has provided preallocation per block-row, convert it to per scalar-row respecting DMSlicedSetBlockFills() if applicable */
49: PetscMalloc2(slice->n*bs,PetscInt,&sd_nnz,(!!slice->o_nnz)*slice->n*bs,PetscInt,&so_nnz);
50: for (i=0; i<slice->n*bs; i++) {
51: sd_nnz[i] = (slice->d_nnz[i/bs]-1) * (slice->ofill ? slice->ofill->i[i%bs+1]-slice->ofill->i[i%bs] : bs)
52: + (slice->dfill ? slice->dfill->i[i%bs+1]-slice->dfill->i[i%bs] : bs);
53: if (so_nnz) {
54: so_nnz[i] = slice->o_nnz[i/bs] * (slice->ofill ? slice->ofill->i[i%bs+1]-slice->ofill->i[i%bs] : bs);
55: }
56: }
57: MatSeqAIJSetPreallocation(*J,slice->d_nz*bs,sd_nnz);
58: MatMPIAIJSetPreallocation(*J,slice->d_nz*bs,sd_nnz,slice->o_nz*bs,so_nnz);
59: PetscFree2(sd_nnz,so_nnz);
60: }
61: }
63: MatSetBlockSize(*J,bs);
65: /* Set up the local to global map. For the scalar map, we have to translate to entry-wise indexing instead of block-wise. */
66: PetscMalloc((slice->n+slice->Nghosts)*bs*sizeof(PetscInt),&globals);
67: MatGetOwnershipRange(*J,&rstart,PETSC_NULL);
68: for (i=0; i<slice->n*bs; i++) {
69: globals[i] = rstart + i;
70: }
71: for (i=0; i<slice->Nghosts*bs; i++) {
72: globals[slice->n*bs+i] = slice->ghosts[i/bs]*bs + i%bs;
73: }
74: ISLocalToGlobalMappingCreate(PETSC_COMM_SELF,(slice->n+slice->Nghosts)*bs,globals,PETSC_OWN_POINTER,&lmap);
75: ISLocalToGlobalMappingBlock(lmap,bs,&blmap);
76: MatSetLocalToGlobalMapping(*J,lmap,lmap);
77: MatSetLocalToGlobalMappingBlock(*J,blmap,blmap);
78: ISLocalToGlobalMappingDestroy(&lmap);
79: ISLocalToGlobalMappingDestroy(&blmap);
80: return(0);
81: }
85: /*@C
86: DMSlicedSetGhosts - Sets the global indices of other processes elements that will
87: be ghosts on this process
89: Not Collective
91: Input Parameters:
92: + slice - the DM object
93: . bs - block size
94: . nlocal - number of local (owned, non-ghost) blocks
95: . Nghosts - number of ghost blocks on this process
96: - ghosts - global indices of each ghost block
98: Level: advanced
100: .seealso DMDestroy(), DMCreateGlobalVector(), DMSlicedGetGlobalIndices()
102: @*/
103: PetscErrorCode DMSlicedSetGhosts(DM dm,PetscInt bs,PetscInt nlocal,PetscInt Nghosts,const PetscInt ghosts[])
104: {
106: DM_Sliced *slice = (DM_Sliced*)dm->data;
110: PetscFree(slice->ghosts);
111: PetscMalloc(Nghosts*sizeof(PetscInt),&slice->ghosts);
112: PetscMemcpy(slice->ghosts,ghosts,Nghosts*sizeof(PetscInt));
113: slice->bs = bs;
114: slice->n = nlocal;
115: slice->Nghosts = Nghosts;
116: return(0);
117: }
121: /*@C
122: DMSlicedSetPreallocation - sets the matrix memory preallocation for matrices computed by DMSliced
124: Not Collective
126: Input Parameters:
127: + slice - the DM object
128: . d_nz - number of block nonzeros per block row in diagonal portion of local
129: submatrix (same for all local rows)
130: . d_nnz - array containing the number of block nonzeros in the various block rows
131: of the in diagonal portion of the local (possibly different for each block
132: row) or PETSC_NULL. You must leave room for the diagonal entry even if it is zero.
133: . o_nz - number of block nonzeros per block row in the off-diagonal portion of local
134: submatrix (same for all local rows).
135: - o_nnz - array containing the number of nonzeros in the various block rows of the
136: off-diagonal portion of the local submatrix (possibly different for
137: each block row) or PETSC_NULL.
139: Notes:
140: See MatMPIBAIJSetPreallocation() for more details on preallocation. If a scalar matrix (AIJ) is
141: obtained with DMSlicedGetMatrix(), the correct preallocation will be set, respecting DMSlicedSetBlockFills().
143: Level: advanced
145: .seealso DMDestroy(), DMCreateGlobalVector(), DMSlicedGetGlobalIndices(), MatMPIAIJSetPreallocation(),
146: MatMPIBAIJSetPreallocation(), DMSlicedGetMatrix(), DMSlicedSetBlockFills()
148: @*/
149: PetscErrorCode DMSlicedSetPreallocation(DM dm,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
150: {
151: DM_Sliced *slice = (DM_Sliced*)dm->data;
155: slice->d_nz = d_nz;
156: slice->d_nnz = (PetscInt*)d_nnz;
157: slice->o_nz = o_nz;
158: slice->o_nnz = (PetscInt*)o_nnz;
159: return(0);
160: }
164: static PetscErrorCode DMSlicedSetBlockFills_Private(PetscInt bs,const PetscInt *fill,DMSlicedBlockFills **inf)
165: {
166: PetscErrorCode ierr;
167: PetscInt i,j,nz,*fi,*fj;
168: DMSlicedBlockFills *f;
172: if (*inf) {PetscFree3((*inf)->i,(*inf)->j,*inf);}
173: if (!fill) return(0);
174: for (i=0,nz=0; i<bs*bs; i++) if (fill[i]) nz++;
175: PetscMalloc3(1,DMSlicedBlockFills,&f,bs+1,PetscInt,&fi,nz,PetscInt,&fj);
176: f->bs = bs;
177: f->nz = nz;
178: f->i = fi;
179: f->j = fj;
180: for (i=0,nz=0; i<bs; i++) {
181: fi[i] = nz;
182: for (j=0; j<bs; j++) if (fill[i*bs+j]) fj[nz++] = j;
183: }
184: fi[i] = nz;
185: *inf = f;
186: return(0);
187: }
191: /*@C
192: DMSlicedSetBlockFills - Sets the fill pattern in each block for a multi-component problem
193: of the matrix returned by DMSlicedGetMatrix().
195: Logically Collective on DM
197: Input Parameter:
198: + sliced - the DM object
199: . dfill - the fill pattern in the diagonal block (may be PETSC_NULL, means use dense block)
200: - ofill - the fill pattern in the off-diagonal blocks
202: Notes:
203: This only makes sense for multicomponent problems using scalar matrix formats (AIJ).
204: See DMDASetBlockFills() for example usage.
206: Level: advanced
208: .seealso DMSlicedGetMatrix(), DMDASetBlockFills()
209: @*/
210: PetscErrorCode DMSlicedSetBlockFills(DM dm,const PetscInt *dfill,const PetscInt *ofill)
211: {
212: DM_Sliced *slice = (DM_Sliced*)dm->data;
217: DMSlicedSetBlockFills_Private(slice->bs,dfill,&slice->dfill);
218: DMSlicedSetBlockFills_Private(slice->bs,ofill,&slice->ofill);
219: return(0);
220: }
224: PetscErrorCode DMDestroy_Sliced(DM dm)
225: {
227: DM_Sliced *slice = (DM_Sliced*)dm->data;
230: VecDestroy(&slice->globalvector);
231: PetscFree(slice->ghosts);
232: if (slice->dfill) {PetscFree3(slice->dfill,slice->dfill->i,slice->dfill->j);}
233: if (slice->ofill) {PetscFree3(slice->ofill,slice->ofill->i,slice->ofill->j);}
234: return(0);
235: }
239: PetscErrorCode DMCreateGlobalVector_Sliced(DM dm,Vec *gvec)
240: {
241: PetscErrorCode ierr;
242: PetscInt bs,cnt;
243: DM_Sliced *slice = (DM_Sliced*)dm->data;
248: *gvec = 0;
249: if (slice->globalvector) {
250: PetscObjectGetReference((PetscObject)slice->globalvector,&cnt);
251: if (cnt == 1) { /* Nobody else has a reference so we can just reference it and give it away */
252: *gvec = slice->globalvector;
253: PetscObjectReference((PetscObject)*gvec);
254: VecZeroEntries(*gvec);
255: } else { /* Someone else has a reference so we duplicate the global vector */
256: VecDuplicate(slice->globalvector,gvec);
257: }
258: } else {
259: bs = slice->bs;
260: VecCreateGhostBlock(((PetscObject)dm)->comm,bs,slice->n*bs,PETSC_DETERMINE,slice->Nghosts,slice->ghosts,&slice->globalvector);
261: *gvec = slice->globalvector;
262: PetscObjectReference((PetscObject)*gvec);
263: }
264: return(0);
265: }
270: PetscErrorCode DMCreate_Sliced(DM p)
271: {
273: DM_Sliced *slice;
276: PetscNewLog(p,DM_Sliced,&slice);
277: p->data = slice;
279: PetscObjectChangeTypeName((PetscObject)p,"Sliced");
280: p->ops->createglobalvector = DMCreateGlobalVector_Sliced;
281: p->ops->getmatrix = DMGetMatrix_Sliced;
282: p->ops->destroy = DMDestroy_Sliced;
283: return(0);
284: }
289: /*@C
290: DMSlicedCreate - Creates a DM object, used to manage data for a unstructured problem
292: Collective on MPI_Comm
294: Input Parameter:
295: . comm - the processors that will share the global vector
297: Output Parameters:
298: . slice - the slice object
300: Level: advanced
302: .seealso DMDestroy(), DMCreateGlobalVector(), DMSlicedGetGlobalIndices()
304: @*/
305: PetscErrorCode DMSlicedCreate(MPI_Comm comm,DM *dm)
306: {
311: DMCreate(comm,dm);
312: DMSetType(*dm,DMSLICED);
313: return(0);
314: }
319: /*@C
320: DMSlicedGetGlobalIndices - Gets the global indices for all the local entries
322: Collective on DM
324: Input Parameter:
325: . slice - the slice object
327: Output Parameters:
328: . idx - the individual indices for each packed vector/array
330: Level: advanced
332: Notes:
333: The idx parameters should be freed by the calling routine with PetscFree()
335: .seealso DMSlicedDestroy(), DMSlicedCreateGlobalVector(), DMSlicedCreate()
337: @*/
338: PetscErrorCode DMSlicedGetGlobalIndices(DM dm,PetscInt *idx[])
339: {
340: return(0);
341: }
344: /* Explanation of the missing functions for DMDA-style handling of the local vector:
346: DMSlicedCreateLocalVector()
347: DMSlicedGlobalToLocalBegin()
348: DMSlicedGlobalToLocalEnd()
350: There is no way to get the global form from a local form, so DMSlicedCreateLocalVector() is a memory leak without
351: external accounting for the global vector. Also, DMSliced intends the user to work with the VecGhost interface since the
352: ghosts are already ordered after the owned entries. Contrast this to a DMDA where the local vector has a special
353: ordering described by the structured grid, hence it cannot share memory with the global form. For this reason, users
354: of DMSliced should work with the global vector and use
356: VecGhostGetLocalForm(), VecGhostRestoreLocalForm()
357: VecGhostUpdateBegin(), VecGhostUpdateEnd()
359: rather than the missing DMDA-style functions. This is conceptually simpler and offers better performance than is
360: possible with the DMDA-style interface.
361: */