Actual source code: mpicsrperm.c

  2: #include <../src/mat/impls/aij/mpi/mpiaij.h>
  5: /*@C
  6:    MatCreateMPIAIJPERM - Creates a sparse parallel matrix whose local 
  7:    portions are stored as SEQAIJPERM matrices (a matrix class that inherits 
  8:    from SEQAIJ but includes some optimizations to allow more effective 
  9:    vectorization).  The same guidelines that apply to MPIAIJ matrices for 
 10:    preallocating the matrix storage apply here as well.

 12:       Collective on MPI_Comm

 14:    Input Parameters:
 15: +  comm - MPI communicator
 16: .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
 17:            This value should be the same as the local size used in creating the 
 18:            y vector for the matrix-vector product y = Ax.
 19: .  n - This value should be the same as the local size used in creating the 
 20:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
 21:        calculated if N is given) For square matrices n is almost always m.
 22: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
 23: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
 24: .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
 25:            (same value is used for all local rows)
 26: .  d_nnz - array containing the number of nonzeros in the various rows of the 
 27:            DIAGONAL portion of the local submatrix (possibly different for each row)
 28:            or PETSC_NULL, if d_nz is used to specify the nonzero structure. 
 29:            The size of this array is equal to the number of local rows, i.e 'm'. 
 30:            You must leave room for the diagonal entry even if it is zero.
 31: .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
 32:            submatrix (same value is used for all local rows).
 33: -  o_nnz - array containing the number of nonzeros in the various rows of the
 34:            OFF-DIAGONAL portion of the local submatrix (possibly different for
 35:            each row) or PETSC_NULL, if o_nz is used to specify the nonzero 
 36:            structure. The size of this array is equal to the number 
 37:            of local rows, i.e 'm'. 

 39:    Output Parameter:
 40: .  A - the matrix 

 42:    Notes:
 43:    If the *_nnz parameter is given then the *_nz parameter is ignored

 45:    m,n,M,N parameters specify the size of the matrix, and its partitioning across
 46:    processors, while d_nz,d_nnz,o_nz,o_nnz parameters specify the approximate
 47:    storage requirements for this matrix.

 49:    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one 
 50:    processor than it must be used on all processors that share the object for 
 51:    that argument.

 53:    The user MUST specify either the local or global matrix dimensions
 54:    (possibly both).

 56:    The parallel matrix is partitioned such that the first m0 rows belong to 
 57:    process 0, the next m1 rows belong to process 1, the next m2 rows belong 
 58:    to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.

 60:    The DIAGONAL portion of the local submatrix of a processor can be defined 
 61:    as the submatrix which is obtained by extraction the part corresponding 
 62:    to the rows r1-r2 and columns r1-r2 of the global matrix, where r1 is the 
 63:    first row that belongs to the processor, and r2 is the last row belonging 
 64:    to the this processor. This is a square mxm matrix. The remaining portion 
 65:    of the local submatrix (mxN) constitute the OFF-DIAGONAL portion.

 67:    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.

 69:    When calling this routine with a single process communicator, a matrix of
 70:    type SEQAIJPERM is returned.  If a matrix of type MPIAIJPERM is desired 
 71:    for this type of communicator, use the construction mechanism:
 72:      MatCreate(...,&A); MatSetType(A,MPIAIJ); MatMPIAIJSetPreallocation(A,...);

 74:    By default, this format uses inodes (identical nodes) when possible.
 75:    We search for consecutive rows with the same nonzero structure, thereby
 76:    reusing matrix information to achieve increased efficiency.

 78:    Options Database Keys:
 79: +  -mat_no_inode  - Do not use inodes
 80: .  -mat_inode_limit <limit> - Sets inode limit (max limit=5)
 81: -  -mat_aij_oneindex - Internally use indexing starting at 1
 82:         rather than 0.  Note that when calling MatSetValues(),
 83:         the user still MUST index entries starting at 0!

 85:    Level: intermediate

 87: .keywords: matrix, cray, sparse, parallel

 89: .seealso: MatCreate(), MatCreateSeqAIJPERM(), MatSetValues()
 90: @*/
 91: PetscErrorCode  MatCreateMPIAIJPERM(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
 92: {
 94:   PetscMPIInt    size;

 97:   MatCreate(comm,A);
 98:   MatSetSizes(*A,m,n,M,N);
 99:   MPI_Comm_size(comm,&size);
100:   if (size > 1) {
101:     MatSetType(*A,MATMPIAIJPERM);
102:     MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);
103:   } else {
104:     MatSetType(*A,MATSEQAIJPERM);
105:     MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);
106:   }
107:   return(0);
108: }


118: PetscErrorCode  MatMPIAIJSetPreallocation_MPIAIJPERM(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
119: {
120:   Mat_MPIAIJ     *b = (Mat_MPIAIJ*)B->data;

124:   MatMPIAIJSetPreallocation_MPIAIJ(B,d_nz,d_nnz,o_nz,o_nnz);
125:   MatConvert_SeqAIJ_SeqAIJPERM(b->A, MATSEQAIJPERM, MAT_REUSE_MATRIX, &b->A);
126:   MatConvert_SeqAIJ_SeqAIJPERM(b->B, MATSEQAIJPERM, MAT_REUSE_MATRIX, &b->B);
127:   return(0);
128: }

134: PetscErrorCode  MatConvert_MPIAIJ_MPIAIJPERM(Mat A,const MatType type,MatReuse reuse,Mat *newmat)
135: {
137:   Mat            B = *newmat;

140:   if (reuse == MAT_INITIAL_MATRIX) {
141:     MatDuplicate(A,MAT_COPY_VALUES,&B);
142:   }

144:   PetscObjectChangeTypeName( (PetscObject) B, MATMPIAIJPERM);
145:   PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPIAIJSetPreallocation_C",
146:                                      "MatMPIAIJSetPreallocation_MPIAIJPERM",
147:                                      MatMPIAIJSetPreallocation_MPIAIJPERM);
148:   *newmat = B;
149:   return(0);
150: }


157: PetscErrorCode  MatCreate_MPIAIJPERM(Mat A)
158: {

162:   MatSetType(A,MATMPIAIJ);
163:   MatConvert_MPIAIJ_MPIAIJPERM(A,MATMPIAIJPERM,MAT_REUSE_MATRIX,&A);
164:   return(0);
165: }

168: /*MC
169:    MATAIJPERM - MATAIJPERM = "AIJPERM" - A matrix type to be used for sparse matrices.

171:    This matrix type is identical to MATSEQAIJPERM when constructed with a single process communicator,
172:    and MATMPIAIJPERM otherwise.  As a result, for single process communicators, 
173:   MatSeqAIJSetPreallocation() is supported, and similarly MatMPIAIJSetPreallocation() is supported 
174:   for communicators controlling multiple processes.  It is recommended that you call both of
175:   the above preallocation routines for simplicity.

177:    Options Database Keys:
178: . -mat_type aijperm - sets the matrix type to "AIJPERM" during a call to MatSetFromOptions()

180:   Level: beginner

182: .seealso: MatCreateMPIAIJPERM(), MATSEQAIJPERM, MATMPIAIJPERM
183: M*/