Actual source code: mpi.h
1: /*
2: This is a special set of bindings for uni-processor use of MPI by the PETSc library.
3:
4: NOT ALL THE MPI CALLS ARE IMPLEMENTED CORRECTLY! Only those needed in PETSc.
6: For example,
7: * Does not implement send to self.
8: * Does not implement attributes correctly.
9: */
11: /*
12: The following info is a response to one of the petsc-maint questions
13: regarding MPIUNI.
15: MPIUNI was developed with the aim of getting PETSc compiled, and
16: usable in the absence of a full MPI implementation. With this, we
17: were able to provide PETSc on Windows, Windows64 even before any MPI
18: implementation was available on these platforms. [Or with certain
19: compilers - like borland, that do not have a usable MPI
20: implementation]
22: However - providing a seqential, standards compliant MPI
23: implementation is *not* the goal of MPIUNI. The development strategy
24: was - to make enough changes to it so that PETSc sources, examples
25: compile without errors, and runs in the uni-processor mode. This is
26: the reason each function is not documented.
28: PETSc usage of MPIUNI is primarily from C. However a minimal fortran
29: interface is also provided - to get PETSc fortran examples with a
30: few MPI calls working.
32: One of the optimzation with MPIUNI, is to avoid the function call
33: overhead, when possible. Hence most of the C functions are
34: implemented as macros. However the function calls cannot be avoided
35: with fortran usage.
37: Most PETSc objects have both sequential and parallel
38: implementations, which are separate. For eg: We have two types of
39: sparse matrix storage formats - SeqAIJ, and MPIAIJ. Some MPI
40: routines are used in the Seq part, but most of them are used in the
41: MPI part. The send/receive calls can be found mostly in the MPI
42: part.
44: When MPIUNI is used, only the Seq version of the PETSc objects are
45: used, even though the MPI variant of the objects are compiled. Since
46: there are no send/receive calls in the Seq variant, PETSc works fine
47: with MPIUNI in seq mode.
49: The reason some send/receive functions are defined to abort(), is to
50: detect sections of code that use send/receive functions, and gets
51: executed in the sequential mode. (which shouldn't happen in case of
52: PETSc).
54: Proper implementation of send/receive would involve writing a
55: function for each of them. Inside each of these functions, we have
56: to check if the send is to self or receive is from self, and then
57: doing the buffering accordingly (until the receive is called) - or
58: what if a nonblocking receive is called, do a copy etc.. Handling
59: the buffering aspects might be complicated enough, that in this
60: case, a proper implementation of MPI might as well be used. This is
61: the reason the send to self is not implemented in MPIUNI, and never
62: will be.
63:
64: Proper implementations of MPI [for eg: MPICH & OpenMPI] are
65: available for most machines. When these packages are available, Its
66: generally preferable to use one of them instead of MPIUNI - even if
67: the user is using PETSc sequentially.
69: - MPIUNI does not support all MPI functions [or functionality].
70: Hence it might not work with external packages or user code that
71: might have MPI calls in it.
73: - MPIUNI is not a standards compliant implementation for np=1.
74: For eg: if the user code has send/recv to self, then it will
75: abort. [Similar issues with a number of other MPI functionality]
76: However MPICH & OpenMPI are the correct implementations of MPI
77: standard for np=1.
79: - When user code uses multiple MPI based packages that have their
80: own *internal* stubs equivalent to MPIUNI - in sequential mode,
81: invariably these multiple implementations of MPI for np=1 conflict
82: with each other. The correct thing to do is: make all such
83: packages use the *same* MPI implementation for np=1. MPICH/OpenMPI
84: satisfy this requirement correctly [and hence the correct choice].
86: - Using MPICH/OpenMPI sequentially should have minimal
87: disadvantages. [for eg: these binaries can be run without
88: mpirun/mpiexec as ./executable, without requiring any extra
89: configurations for ssh/rsh/daemons etc..]. This should not be a
90: reason to avoid these packages for sequential use.
92: Instructions for building standalone MPIUNI [for eg: linux/gcc+gfortran]:
93: - extract include/mpiuni/mpi.h,mpif.f, src/sys/mpiuni/mpi.c from PETSc
94: - remove reference to petscconf.h from mpi.h
95: - gcc -c mpi.c -DPETSC_HAVE_STDLIB_H -DPETSC_HAVE_FORTRAN_UNDERSCORE
96: - ar cr libmpiuni.a mpi.o
98: */
103: /* Requred by abort() in mpi.c & for win64 */
104: #include "petscconf.h"
106: #if defined(__cplusplus)
108: #endif
110: /* require an int variable large enough to hold a pointer */
111: #if !defined(MPIUNI_INTPTR)
112: #define MPIUNI_INTPTR long
113: #endif
115: /*
117: MPIUNI_TMP is used in the macros below only to stop various C/C++ compilers
118: from generating warning messages about unused variables while compiling PETSc.
119: */
122: #define MPI_COMM_WORLD 1
123: #define MPI_COMM_SELF MPI_COMM_WORLD
124: #define MPI_COMM_NULL 0
125: #define MPI_SUCCESS 0
126: #define MPI_IDENT 0
127: #define MPI_CONGRUENT 1
128: #define MPI_SIMILAR 2
129: #define MPI_UNEQUAL 3
130: #define MPI_ANY_SOURCE (-2)
131: #define MPI_KEYVAL_INVALID 0
132: #define MPI_ERR_UNKNOWN 18
133: #define MPI_ERR_INTERN 21
134: #define MPI_ERR_OTHER 1
135: #define MPI_TAG_UB 0
136: #define MPI_ERRORS_RETURN 0
137: #define MPI_UNDEFINED (-32766)
139: /* External types */
140: typedef int MPI_Comm;
141: typedef void *MPI_Request;
142: typedef void *MPI_Group;
143: typedef struct {int MPI_TAG,MPI_SOURCE,MPI_ERROR;} MPI_Status;
144: typedef char *MPI_Errhandler;
145: typedef int MPI_Fint;
146: typedef int MPI_File;
147: typedef int MPI_Info;
148: typedef int MPI_Offset;
151: /* In order to handle datatypes, we make them into "sizeof(raw-type)";
152: this allows us to do the MPIUNI_Memcpy's easily */
153: #define MPI_Datatype int
154: #define MPI_FLOAT sizeof(float)
155: #define MPI_DOUBLE sizeof(double)
156: #define MPI_LONG_DOUBLE sizeof(long double)
157: #define MPI_CHAR sizeof(char)
158: #define MPI_BYTE sizeof(char)
159: #define MPI_INT sizeof(int)
160: #define MPI_LONG sizeof(long)
161: #define MPI_LONG_LONG_INT sizeof(long long)
162: #define MPI_SHORT sizeof(short)
163: #define MPI_UNSIGNED_SHORT sizeof(unsigned short)
164: #define MPI_UNSIGNED sizeof(unsigned)
165: #define MPI_UNSIGNED_CHAR sizeof(unsigned char)
166: #define MPI_UNSIGNED_LONG sizeof(unsigned long)
167: #define MPI_COMPLEX 2*sizeof(float)
168: #define MPI_C_COMPLEX 2*sizeof(float)
169: #define MPI_C_DOUBLE_COMPLEX 2*sizeof(double)
170: #define MPI_FLOAT_INT (sizeof(float) + sizeof(int))
171: #define MPI_DOUBLE_INT (sizeof(double) + sizeof(int))
172: #define MPI_LONG_INT (sizeof(long) + sizeof(int))
173: #define MPI_SHORT_INT (sizeof(short) + sizeof(int))
174: #define MPI_2INT (2* sizeof(int))
176: #if defined(PETSC_USE_REAL___FLOAT128)
178: #define MPI_sizeof(datatype) ((datatype == MPIU___FLOAT128) ? 2*sizeof(double) : datatype)
179: #else
180: #define MPI_sizeof(datatype) (datatype)
181: #endif
185: #define MPI_REQUEST_NULL ((MPI_Request)0)
186: #define MPI_GROUP_NULL ((MPI_Group)0)
187: #define MPI_INFO_NULL ((MPI_Info)0)
188: #define MPI_BOTTOM (void *)0
189: typedef int MPI_Op;
191: #define MPI_MODE_RDONLY 0
192: #define MPI_MODE_WRONLY 0
193: #define MPI_MODE_CREATE 0
195: #define MPI_SUM 0
196: #define MPI_MAX 0
197: #define MPI_MIN 0
198: #define MPI_ANY_TAG (-1)
199: #define MPI_DATATYPE_NULL 0
200: #define MPI_PACKED 0
201: #define MPI_MAX_ERROR_STRING 2056
202: #define MPI_STATUS_IGNORE (MPI_Status *)1
203: #define MPI_ORDER_FORTRAN 57
204: #define MPI_IN_PLACE (void *) -1
206: /*
207: Prototypes of some functions which are implemented in mpi.c
208: */
209: typedef int (MPI_Copy_function)(MPI_Comm,int,void *,void *,void *,int *);
210: typedef int (MPI_Delete_function)(MPI_Comm,int,void *,void *);
211: typedef void (MPI_User_function)(void*, void *, int *, MPI_Datatype *);
213: /*
214: In order that the PETSc MPIUNI can be used with another package that has its
215: own MPIUni we map the following function names to a unique PETSc name. Those functions
216: are defined in mpi.c and put into the libpetscsys.a or libpetsc.a library.
218: Note that this does not work for the MPIUni Fortran symbols which are explicitly in the
219: PETSc libraries unless the flag MPIUNI_AVOID_MPI_NAMESPACE is set.
220: */
221: #define MPI_Abort Petsc_MPI_Abort
222: #define MPI_Attr_get Petsc_MPI_Attr_get
223: #define MPI_Keyval_free Petsc_MPI_Keyval_free
224: #define MPI_Attr_put Petsc_MPI_Attr_put
225: #define MPI_Attr_delete Petsc_MPI_Attr_delete
226: #define MPI_Keyval_create Petsc_MPI_Keyval_create
227: #define MPI_Comm_free Petsc_MPI_Comm_free
228: #define MPI_Comm_dup Petsc_MPI_Comm_dup
229: #define MPI_Comm_create Petsc_MPI_Comm_create
230: #define MPI_Init Petsc_MPI_Init
231: #define MPI_Finalize Petsc_MPI_Finalize
232: #define MPI_Initialized Petsc_MPI_Initialized
233: #define MPI_Finalized Petsc_MPI_Finalized
234: #define MPI_Comm_size Petsc_MPI_Comm_size
235: #define MPI_Comm_rank Petsc_MPI_Comm_rank
253: #define MPI_Aint MPIUNI_INTPTR
254: /*
255: Routines we have replace with macros that do nothing
256: Some return error codes others return success
257: */
259: #define MPI_Comm_f2c(comm) (MPI_Comm)(comm)
260: #define MPI_Comm_c2f(comm) (MPI_Fint)(comm)
262: #define MPI_Send(buf,count,datatype,dest,tag,comm) \
263: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
264: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
265: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
266: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
267: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
268: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
269: MPI_Abort(MPI_COMM_WORLD,0))
270: #define MPI_Recv(buf,count,datatype,source,tag,comm,status) \
271: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
272: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
273: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
274: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (source),\
275: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
276: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
277: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
278: MPI_Abort(MPI_COMM_WORLD,0))
279: #define MPI_Get_count(status, datatype,count) \
280: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
281: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
282: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
283: MPI_Abort(MPI_COMM_WORLD,0))
284: #define MPI_Bsend(buf,count,datatype,dest,tag,comm) \
285: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
286: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
287: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
288: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
289: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
290: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
291: MPI_Abort(MPI_COMM_WORLD,0))
292: #define MPI_Ssend(buf,count, datatype,dest,tag,comm) \
293: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
294: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
295: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
296: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
297: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
298: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
299: MPI_Abort(MPI_COMM_WORLD,0))
300: #define MPI_Rsend(buf,count, datatype,dest,tag,comm) \
301: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
302: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
303: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
304: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
305: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
306: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
307: MPI_Abort(MPI_COMM_WORLD,0))
308: #define MPI_Buffer_attach(buffer,size) \
309: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buffer),\
310: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (size),\
311: MPI_SUCCESS)
312: #define MPI_Buffer_detach(buffer,size)\
313: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buffer),\
314: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (size),\
315: MPI_SUCCESS)
316: #define MPI_Ibsend(buf,count, datatype,dest,tag,comm,request) \
317: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
318: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
319: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
320: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
321: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
322: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
323: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
324: MPI_Abort(MPI_COMM_WORLD,0))
325: #define MPI_Issend(buf,count, datatype,dest,tag,comm,request) \
326: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
327: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
328: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
329: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
330: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
331: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
332: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
333: MPI_Abort(MPI_COMM_WORLD,0))
334: #define MPI_Irsend(buf,count, datatype,dest,tag,comm,request) \
335: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
336: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
337: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
338: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
339: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
340: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
341: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
342: MPI_Abort(MPI_COMM_WORLD,0))
343: #define MPI_Irecv(buf,count, datatype,source,tag,comm,request) \
344: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
345: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
346: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
347: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (source),\
348: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
349: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
350: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
351: MPI_Abort(MPI_COMM_WORLD,0))
352: #define MPI_Isend(buf,count, datatype,dest,tag,comm,request) \
353: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
354: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
355: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
356: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
357: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
358: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
359: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
360: MPI_Abort(MPI_COMM_WORLD,0))
361: #define MPI_Wait(request,status) \
362: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
363: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
364: MPI_SUCCESS)
365: #define MPI_Test(request,flag,status) \
366: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
367: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
368: *(flag) = 0, \
369: MPI_SUCCESS)
370: #define MPI_Request_free(request) \
371: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
372: MPI_SUCCESS)
373: #define MPI_Waitany(a,b,c,d) \
374: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (a),\
375: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (b),\
376: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (c),\
377: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (d),(*c = 0), \
378: MPI_SUCCESS)
379: #define MPI_Testany(a,b,c,d,e) \
380: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (a),\
381: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (b),\
382: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (c),\
383: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (d),\
384: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (e),\
385: MPI_SUCCESS)
386: #define MPI_Waitall(count,array_of_requests,array_of_statuses) \
387: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
388: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
389: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_statuses),\
390: MPI_SUCCESS)
391: #define MPI_Testall(count,array_of_requests,flag,array_of_statuses) \
392: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
393: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
394: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (flag),\
395: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_statuses),\
396: MPI_SUCCESS)
397: #define MPI_Waitsome(incount,array_of_requests,outcount,\
398: array_of_indices,array_of_statuses) \
399: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (incount),\
400: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
401: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (outcount),\
402: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_indices),\
403: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_statuses),\
404: MPI_SUCCESS)
405: #define MPI_Comm_group(comm,group) \
406: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
407: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group),\
408: MPI_SUCCESS)
409: #define MPI_Group_incl(group,n,ranks,newgroup) \
410: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group),\
411: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (n),\
412: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (ranks),\
413: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (newgroup),\
414: MPI_SUCCESS)
415: #define MPI_Testsome(incount,array_of_requests,outcount,\
416: array_of_indices,array_of_statuses) MPI_SUCCESS
417: #define MPI_Iprobe(source,tag,comm,flag,status) (*(flag)=0, MPI_SUCCESS)
418: #define MPI_Probe(source,tag,comm,status) MPI_SUCCESS
419: #define MPI_Cancel(request) (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),MPI_SUCCESS)
420: #define MPI_Test_cancelled(status,flag) (*(flag)=0,MPI_SUCCESS)
421: #define MPI_Send_init(buf,count, datatype,dest,tag,comm,request) \
422: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
423: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
424: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
425: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
426: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
427: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
428: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
429: MPI_SUCCESS)
430: #define MPI_Bsend_init(buf,count, datatype,dest,tag,comm,request) \
431: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
432: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
433: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
434: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
435: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
436: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
437: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
438: MPI_SUCCESS)
439: #define MPI_Ssend_init(buf,count, datatype,dest,tag,comm,request) \
440: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
441: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
442: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
443: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
444: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
445: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
446: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
447: MPI_SUCCESS)
448: #define MPI_Bsend_init(buf,count, datatype,dest,tag,comm,request) \
449: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
450: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
451: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
452: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
453: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
454: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
455: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
456: MPI_SUCCESS)
457: #define MPI_Rsend_init(buf,count, datatype,dest,tag,comm,request) \
458: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
459: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
460: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
461: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
462: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
463: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
464: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
465: MPI_SUCCESS)
466: #define MPI_Recv_init(buf,count, datatype,source,tag,comm,request) \
467: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
468: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
469: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
470: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (source),\
471: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
472: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
473: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
474: MPI_SUCCESS)
475: #define MPI_Start(request) (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),MPI_SUCCESS)
476: #define MPI_Startall(count,array_of_requests) \
477: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
478: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
479: MPI_SUCCESS)
480: #define MPI_Op_create(function,commute,op) \
481: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (function),\
482: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (commute),\
483: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (op),\
484: MPI_SUCCESS)
485: #define MPI_Op_free(op) \
486: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (op),\
487: MPI_SUCCESS)
488: /* Need to determine sizeof "sendtype" */
489: #define MPI_Sendrecv(sendbuf,sendcount, sendtype,\
490: dest,sendtag,recvbuf,recvcount,\
491: recvtype,source,recvtag,\
492: comm,status) \
493: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount) * MPI_sizeof(sendtype))
494: #define MPI_Sendrecv_replace(buf,count, datatype,dest,sendtag,\
495: source,recvtag,comm,status) MPI_SUCCESS
496: #define MPI_Type_contiguous(count, oldtype,newtype) \
497: (*(newtype) = (count)*(oldtype),MPI_SUCCESS)
498: #define MPI_Type_vector(count,blocklength,stride,oldtype, newtype) MPI_SUCCESS
499: #define MPI_Type_hvector(count,blocklength,stride,oldtype, newtype) MPI_SUCCESS
500: #define MPI_Type_indexed(count,array_of_blocklengths,\
501: array_of_displacements, oldtype,\
502: newtype) MPI_SUCCESS
503: #define MPI_Type_hindexed(count,array_of_blocklengths,\
504: array_of_displacements, oldtype,\
505: newtype) MPI_SUCCESS
506: #define MPI_Type_struct(count,array_of_blocklengths,\
507: array_of_displacements,\
508: array_of_types, newtype) MPI_SUCCESS
509: #define MPI_Address(location,address) \
510: (*(address) = (MPIUNI_INTPTR)(char *)(location),MPI_SUCCESS)
511: #define MPI_Type_extent(datatype,extent) *(extent) = datatype
512: #define MPI_Type_size(datatype,size) *(size) = datatype
513: #define MPI_Type_lb(datatype,displacement) \
514: MPI_Abort(MPI_COMM_WORLD,0)
515: #define MPI_Type_ub(datatype,displacement) \
516: MPI_Abort(MPI_COMM_WORLD,0)
517: #define MPI_Type_commit(datatype) (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
518: MPI_SUCCESS)
519: #define MPI_Type_free(datatype) MPI_SUCCESS
520: #define MPI_Get_elements(status, datatype,count) \
521: MPI_Abort(MPI_COMM_WORLD,0)
522: #define MPI_Pack(inbuf,incount, datatype,outbuf,\
523: outsize,position, comm) \
524: MPI_Abort(MPI_COMM_WORLD,0)
525: #define MPI_Unpack(inbuf,insize,position,outbuf,\
526: outcount, datatype,comm) \
527: MPI_Abort(MPI_COMM_WORLD,0)
528: #define MPI_Pack_size(incount, datatype,comm,size) \
529: MPI_Abort(MPI_COMM_WORLD,0)
530: #define MPI_Barrier(comm) \
531: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
532: MPI_SUCCESS)
533: #define MPI_Bcast(buffer,count,datatype,root,comm) \
534: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buffer),\
535: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
536: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
537: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
538: MPI_SUCCESS)
539: #define MPI_Gather(sendbuf,sendcount, sendtype,\
540: recvbuf,recvcount, recvtype,\
541: root,comm) \
542: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
543: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
544: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
545: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
546: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)),\
547: MPI_SUCCESS)
548: #define MPI_Gatherv(sendbuf,sendcount, sendtype,\
549: recvbuf,recvcounts,displs,\
550: recvtype,root,comm) \
551: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcounts),\
552: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (displs),\
553: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
554: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
555: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
556: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)),\
557: MPI_SUCCESS)
558: #define MPI_Scatter(sendbuf,sendcount, sendtype,\
559: recvbuf,recvcount, recvtype,\
560: root,comm) \
561: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendbuf),\
562: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendcount),\
563: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendtype),\
564: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvbuf),\
565: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
566: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
567: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
568: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_Abort(MPI_COMM_WORLD,0))
569: #define MPI_Scatterv(sendbuf,sendcounts,displs,\
570: sendtype, recvbuf,recvcount,\
571: recvtype,root,comm) \
572: (MPIUNI_Memcpy(recvbuf,sendbuf,(recvcount)*MPI_sizeof(recvtype)),\
573: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (displs),\
574: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendtype),\
575: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendcounts),\
576: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
577: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
578: MPI_SUCCESS)
579: #define MPI_Allgather(sendbuf,sendcount, sendtype,\
580: recvbuf,recvcount, recvtype,comm) \
581: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
582: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
583: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
584: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)),\
585: MPI_SUCCESS)
586: #define MPI_Allgatherv(sendbuf,sendcount, sendtype,\
587: recvbuf,recvcounts,displs,recvtype,comm) \
588: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcounts),\
589: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (displs),\
590: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
591: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
592: (sendbuf != MPI_IN_PLACE) ? MPIUNI_Memcpy((recvbuf),(sendbuf),(sendcount)*MPI_sizeof(sendtype)) : 0, \
593: MPI_SUCCESS)
594: #define MPI_Alltoall(sendbuf,sendcount, sendtype,\
595: recvbuf,recvcount, recvtype,comm) \
596: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
597: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
598: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
599: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)),\
600: MPI_SUCCESS)
601: #define MPI_Alltoallv(sendbuf,sendcounts,sdispls,\
602: sendtype, recvbuf,recvcounts,\
603: rdispls, recvtype,comm) MPI_Abort(MPI_COMM_WORLD,0)
604: #define MPI_Alltoallw(sendbuf,sendcounts,sdispls,\
605: sendtypes, recvbuf,recvcounts,\
606: rdispls, recvtypes,comm) MPI_Abort(MPI_COMM_WORLD,0)
607: #define MPI_Reduce(sendbuf, recvbuf,count,\
608: datatype,op,root,comm) \
609: (MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)),\
610: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_SUCCESS)
611: #define MPI_Allreduce(sendbuf, recvbuf,count,datatype,op,comm) \
612: ((sendbuf != MPI_IN_PLACE) ? MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)) : 0, \
613: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_SUCCESS)
614: #define MPI_Scan(sendbuf, recvbuf,count,datatype,op,comm) \
615: (MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)),\
616: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_SUCCESS)
617: #define MPI_Exscan(sendbuf, recvbuf,count,datatype,op,comm) MPI_SUCCESS
618: #define MPI_Reduce_scatter(sendbuf, recvbuf,recvcounts,\
619: datatype,op,comm) \
620: MPI_Abort(MPI_COMM_WORLD,0)
621: #define MPI_Group_size(group,size) (*(size)=1,MPI_SUCCESS)
622: #define MPI_Group_rank(group,rank) (*(rank)=0,MPI_SUCCESS)
623: #define MPI_Group_translate_ranks (group1,n,ranks1,\
624: group2,ranks2) MPI_Abort(MPI_COMM_WORLD,0)
625: #define MPI_Group_compare(group1,group2,result) \
626: (*(result)=1,MPI_SUCCESS)
627: #define MPI_Group_union(group1,group2,newgroup) MPI_SUCCESS
628: #define MPI_Group_intersection(group1,group2,newgroup) MPI_SUCCESS
629: #define MPI_Group_difference(group1,group2,newgroup) MPI_SUCCESS
630: #define MPI_Group_excl(group,n,ranks,newgroup) MPI_SUCCESS
631: #define MPI_Group_range_incl(group,n,ranges,newgroup) MPI_SUCCESS
632: #define MPI_Group_range_excl(group,n,ranges,newgroup) MPI_SUCCESS
633: #define MPI_Group_free(group) \
634: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group),\
635: MPI_SUCCESS)
636: #define MPI_Comm_compare(comm1,comm2,result) \
637: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm1),\
638: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm2),\
639: *(result)=MPI_IDENT,\
640: MPI_SUCCESS)
641: #define MPI_Comm_split(comm,color,key,newcomm) MPI_Comm_dup(comm,newcomm)
642: #define MPI_Comm_test_inter(comm,flag) (*(flag)=1,MPI_SUCCESS)
643: #define MPI_Comm_remote_size(comm,size) (*(size)=1,MPI_SUCCESS)
644: #define MPI_Comm_remote_group(comm,group) MPI_SUCCESS
645: #define MPI_Intercomm_create(local_comm,local_leader,peer_comm,\
646: remote_leader,tag,newintercomm) MPI_SUCCESS
647: #define MPI_Intercomm_merge(intercomm,high,newintracomm) MPI_SUCCESS
649: #define MPI_Topo_test(comm,status) MPI_SUCCESS
650: #define MPI_Cart_create(comm_old,ndims,dims,periods,\
651: reorder,comm_cart) MPI_SUCCESS
652: #define MPI_Dims_create(nnodes,ndims,dims) MPI_Abort(MPI_COMM_WORLD,0)
653: #define MPI_Graph_create(comm,a,b,c,d,e) MPI_SUCCESS
654: #define MPI_Graphdims_Get(comm,nnodes,nedges) MPI_Abort(MPI_COMM_WORLD,0)
655: #define MPI_Graph_get(comm,a,b,c,d) MPI_Abort(MPI_COMM_WORLD,0)
656: #define MPI_Cartdim_get(comm,ndims) MPI_Abort(MPI_COMM_WORLD,0)
657: #define MPI_Cart_get(comm,maxdims,dims,periods,coords) \
658: MPI_Abort(MPI_COMM_WORLD,0)
659: #define MPI_Cart_rank(comm,coords,rank) MPI_Abort(MPI_COMM_WORLD,0)
660: #define MPI_Cart_coords(comm,rank,maxdims,coords) \
661: MPI_Abort(MPI_COMM_WORLD,0)
662: #define MPI_Graph_neighbors_count(comm,rank,nneighbors) \
663: MPI_Abort(MPI_COMM_WORLD,0)
664: #define MPI_Graph_neighbors(comm,rank,maxneighbors,neighbors) \
665: MPI_Abort(MPI_COMM_WORLD,0)
666: #define MPI_Cart_shift(comm,direction,disp,rank_source,rank_dest) \
667: MPI_Abort(MPI_COMM_WORLD,0)
668: #define MPI_Cart_sub(comm,remain_dims,newcomm) MPI_Abort(MPI_COMM_WORLD,0)
669: #define MPI_Cart_map(comm,ndims,dims,periods,newrank) MPI_Abort(MPI_COMM_WORLD,0)
670: #define MPI_Graph_map(comm,a,b,c,d) MPI_Abort(MPI_COMM_WORLD,0)
671: #define MPI_Get_processor_name(name,result_len) \
672: (MPIUNI_Memcpy(name,"localhost",9*sizeof(char)),name[10] = 0,*(result_len) = 10)
673: #define MPI_Errhandler_create(function,errhandler) (*(errhandler) = (MPI_Errhandler) 0, MPI_SUCCESS)
674: #define MPI_Errhandler_set(comm,errhandler) \
675: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
676: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (errhandler),\
677: MPI_SUCCESS)
678: #define MPI_Errhandler_get(comm,errhandler) MPI_SUCCESS
679: #define MPI_Errhandler_free(errhandler) MPI_SUCCESS
680: #define MPI_Error_string(errorcode,string,result_len) MPI_SUCCESS
681: #define MPI_Error_class(errorcode,errorclass) MPI_SUCCESS
682: #define MPI_Wtick() 1.0
683: #define MPI_Wtime() 0.0
684: #define MPI_Pcontrol(level) MPI_SUCCESS
686: #define MPI_NULL_COPY_FN 0
687: #define MPI_NULL_DELETE_FN 0
689: /* MPI-IO additions */
691: #define MPI_File_open(comm,filename,amode,info,mpi_fh) \
692: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm), \
693: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (filename), \
694: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (amode), \
695: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (info), \
696: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh), \
697: MPI_Abort(MPI_COMM_WORLD,0))
699: #define MPI_File_close(mpi_fh) \
700: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh), \
701: MPI_Abort(MPI_COMM_WORLD,0))
703: #define MPI_File_set_view(mpi_fh,disp,etype,filetype,datarep,info) \
704: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh), \
705: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (disp), \
706: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (etype), \
707: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (filetype), \
708: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datarep), \
709: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (info), \
710: MPI_Abort(MPI_COMM_WORLD,0))
712: #define MPI_Type_get_extent(datatype,lb,extent) \
713: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype), \
714: *(lb) = 0, *(extent) = datatype,0)
716: #define MPI_File_write_all(mpi_fh,buf,count,datatype,status) \
717: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh), \
718: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf), \
719: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count), \
720: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype), \
721: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status), \
722: MPI_Abort(MPI_COMM_WORLD,0))
724: #define MPI_File_read_all(mpi_fh,buf,count,datatype,status) \
725: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh), \
726: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf), \
727: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count), \
728: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype), \
729: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status), \
730: MPI_Abort(MPI_COMM_WORLD,0))
732: /* called from PetscInitialize() - so return success */
733: #define MPI_Register_datarep(name,read_conv_fn,write_conv_fn,extent_fn,state) \
734: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (name), \
735: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (read_conv_fn), \
736: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (write_conv_fn), \
737: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (extent_fn), \
738: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (state), \
739: MPI_SUCCESS)
741: #define MPI_Type_create_subarray(ndims,array_of_sizes,array_of_subsizes,array_of_starts,order,oldtype,newtype) \
742: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (ndims), \
743: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_sizes), \
744: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_subsizes), \
745: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_starts), \
746: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (order), \
747: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (oldtype), \
748: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (newtype), \
749: MPI_Abort(MPI_COMM_WORLD,0))
751: #if defined(__cplusplus)
752: }
753: #endif
754: #endif