Actual source code: mpi.c
1: /*
2: This provides a few of the MPI-uni functions that cannot be implemented
3: with C macros
4: */
5: #include <mpiuni/mpi.h>
7: #error "Wrong mpi.h included! require mpi.h from MPIUNI"
8: #endif
9: #if !defined(PETSC_STDCALL)
10: #define PETSC_STDCALL
11: #endif
12: #include <stdio.h>
13: #if defined(PETSC_HAVE_STDLIB_H)
14: #include <stdlib.h>
15: #endif
17: #define MPI_SUCCESS 0
18: #define MPI_FAILURE 1
19: void *MPIUNI_TMP = 0;
20: int MPIUNI_DATASIZE[10] = {sizeof(int),sizeof(float),sizeof(double),2*sizeof(double),sizeof(char),2*sizeof(int),4*sizeof(double),4,8,2*sizeof(double)};
21: /*
22: With MPI Uni there is only one communicator, which is called 1.
23: */
24: #define MAX_ATTR 128
26: typedef struct {
27: void *extra_state;
28: void *attribute_val;
29: int active;
30: MPI_Delete_function *del;
31: } MPI_Attr;
33: static MPI_Attr attr[MAX_ATTR];
34: static int num_attr = 1,mpi_tag_ub = 100000000;
36: #if defined(__cplusplus)
38: #endif
40: /*
41: To avoid problems with prototypes to the system memcpy() it is duplicated here
42: */
43: int MPIUNI_Memcpy(void *a,const void* b,int n) {
44: int i;
45: char *aa= (char*)a;
46: char *bb= (char*)b;
48: for (i=0; i<n; i++) aa[i] = bb[i];
49: return 0;
50: }
52: /*
53: Used to set the built-in MPI_TAG_UB attribute
54: */
55: static int Keyval_setup(void)
56: {
57: attr[0].active = 1;
58: attr[0].attribute_val = &mpi_tag_ub;
59: return 0;
60: }
62: int MPI_Keyval_create(MPI_Copy_function *copy_fn,MPI_Delete_function *delete_fn,int *keyval,void *extra_state)
63: {
64: if (num_attr >= MAX_ATTR) MPI_Abort(MPI_COMM_WORLD,1);
66: attr[num_attr].extra_state = extra_state;
67: attr[num_attr].del = delete_fn;
68: *keyval = num_attr++;
69: return 0;
70: }
72: int MPI_Keyval_free(int *keyval)
73: {
74: attr[*keyval].active = 0;
75: return MPI_SUCCESS;
76: }
78: int MPI_Attr_put(MPI_Comm comm,int keyval,void *attribute_val)
79: {
80: attr[keyval].active = 1;
81: attr[keyval].attribute_val = attribute_val;
82: return MPI_SUCCESS;
83: }
84:
85: int MPI_Attr_delete(MPI_Comm comm,int keyval)
86: {
87: if (attr[keyval].active && attr[keyval].del) {
88: void* save_attribute_val = attr[keyval].attribute_val;
89: attr[keyval].active = 0;
90: attr[keyval].attribute_val = 0;
91: (*(attr[keyval].del))(comm,keyval,save_attribute_val,attr[keyval].extra_state);
92: }
93: return MPI_SUCCESS;
94: }
96: int MPI_Attr_get(MPI_Comm comm,int keyval,void *attribute_val,int *flag)
97: {
98: if (!keyval) Keyval_setup();
99: *flag = attr[keyval].active;
100: *(void **)attribute_val = attr[keyval].attribute_val;
101: return MPI_SUCCESS;
102: }
104: int MPI_Comm_create(MPI_Comm comm,MPI_Group group,MPI_Comm *newcomm)
105: {
106: *newcomm = comm;
107: return MPI_SUCCESS;
108: }
110: static int dups = 0;
111: int MPI_Comm_dup(MPI_Comm comm,MPI_Comm *out)
112: {
113: *out = comm;
114: dups++;
115: return 0;
116: }
118: int MPI_Comm_free(MPI_Comm *comm)
119: {
120: int i;
122: if (--dups) return MPI_SUCCESS;
123: for (i=0; i<num_attr; i++) {
124: if (attr[i].active && attr[i].del) {
125: (*attr[i].del)(*comm,i,attr[i].attribute_val,attr[i].extra_state);
126: }
127: attr[i].active = 0;
128: }
129: return MPI_SUCCESS;
130: }
132: int MPI_Comm_size(MPI_Comm comm, int*size)
133: {
134: *size=1;
135: return MPI_SUCCESS;
136: }
138: int MPI_Comm_rank(MPI_Comm comm, int*rank)
139: {
140: *rank=0;
141: return MPI_SUCCESS;
142: }
144: int MPI_Abort(MPI_Comm comm,int errorcode)
145: {
146: abort();
147: return MPI_SUCCESS;
148: }
150: /* --------------------------------------------------------------------------*/
151:
152: static int MPI_was_initialized = 0;
153: static int MPI_was_finalized = 0;
155: int MPI_Init(int *argc, char ***argv)
156: {
157: if (MPI_was_initialized) return 1;
158: if (MPI_was_finalized) return 1;
159: MPI_was_initialized = 1;
160: return 0;
161: }
163: int MPI_Finalize(void)
164: {
165: if (MPI_was_finalized) return 1;
166: if (!MPI_was_initialized) return 1;
167: MPI_was_finalized = 1;
168: return 0;
169: }
171: int MPI_Initialized(int *flag)
172: {
173: *flag = MPI_was_initialized;
174: return 0;
175: }
177: int MPI_Finalized(int *flag)
178: {
179: *flag = MPI_was_finalized;
180: return 0;
181: }
183: /* ------------------- Fortran versions of several routines ------------------ */
185: #if defined(PETSC_HAVE_FORTRAN_CAPS)
186: #define mpi_init_ MPI_INIT
187: #define mpi_finalize_ MPI_FINALIZE
188: #define mpi_comm_size_ MPI_COMM_SIZE
189: #define mpi_comm_rank_ MPI_COMM_RANK
190: #define mpi_abort_ MPI_ABORT
191: #define mpi_reduce_ MPI_REDUCE
192: #define mpi_allreduce_ MPI_ALLREDUCE
193: #define mpi_barrier_ MPI_BARRIER
194: #define mpi_bcast_ MPI_BCAST
195: #define mpi_gather_ MPI_GATHER
196: #define mpi_allgather_ MPI_ALLGATHER
197: #define mpi_comm_split_ MPI_COMM_SPLIT
198: #define mpi_scan_ MPI_SCAN
199: #define mpi_send_ MPI_SEND
200: #define mpi_recv_ MPI_RECV
201: #define mpi_reduce_scatter_ MPI_REDUCE_SCATTER
202: #define mpi_irecv_ MPI_IRECV
203: #define mpi_isend_ MPI_ISEND
204: #define mpi_sendrecv_ MPI_SENDRECV
205: #define mpi_test_ MPI_TEST
206: #define mpi_waitall_ MPI_WAITALL
207: #define mpi_waitany_ MPI_WAITANY
208: #define mpi_allgatherv_ MPI_ALLGATHERV
209: #define mpi_alltoallv_ MPI_ALLTOALLV
210: #define mpi_comm_create_ MPI_COMM_CREATE
211: #define mpi_address_ MPI_ADDRESS
212: #define mpi_pack_ MPI_PACK
213: #define mpi_unpack_ MPI_UNPACK
214: #define mpi_pack_size_ MPI_PACK_SIZE
215: #define mpi_type_struct_ MPI_TYPE_STRUCT
216: #define mpi_type_commit_ MPI_TYPE_COMMIT
217: #define mpi_wtime_ MPI_WTIME
218: #define mpi_cancel_ MPI_CANCEL
219: #define mpi_comm_dup_ MPI_COMM_DUP
220: #define mpi_comm_free_ MPI_COMM_FREE
221: #define mpi_get_count_ MPI_GET_COUNT
222: #define mpi_get_processor_name_ MPI_GET_PROCESSOR_NAME
223: #define mpi_initialized_ MPI_INITIALIZED
224: #define mpi_iprobe_ MPI_IPROBE
225: #define mpi_probe_ MPI_PROBE
226: #define mpi_request_free_ MPI_REQUEST_FREE
227: #define mpi_ssend_ MPI_SSEND
228: #define mpi_wait_ MPI_WAIT
229: #define mpi_comm_group_ MPI_COMM_GROUP
230: #define mpi_exscan_ MPI_EXSCAN
231: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
232: #define mpi_init_ mpi_init
233: #define mpi_finalize_ mpi_finalize
234: #define mpi_comm_size_ mpi_comm_size
235: #define mpi_comm_rank_ mpi_comm_rank
236: #define mpi_abort_ mpi_abort
237: #define mpi_reduce_ mpi_reduce
238: #define mpi_allreduce_ mpi_allreduce
239: #define mpi_barrier_ mpi_barrier
240: #define mpi_bcast_ mpi_bcast
241: #define mpi_gather_ mpi_gather
242: #define mpi_allgather_ mpi_allgather
243: #define mpi_comm_split_ mpi_comm_split
244: #define mpi_scan_ mpi_scan
245: #define mpi_send_ mpi_send
246: #define mpi_recv_ mpi_recv
247: #define mpi_reduce_scatter_ mpi_reduce_scatter
248: #define mpi_irecv_ mpi_irecv
249: #define mpi_isend_ mpi_isend
250: #define mpi_sendrecv_ mpi_sendrecv
251: #define mpi_test_ mpi_test
252: #define mpi_waitall_ mpi_waitall
253: #define mpi_waitany_ mpi_waitany
254: #define mpi_allgatherv_ mpi_allgatherv
255: #define mpi_alltoallv_ mpi_alltoallv
256: #define mpi_comm_create_ mpi_comm_create
257: #define mpi_address_ mpi_address
258: #define mpi_pack_ mpi_pack
259: #define mpi_unpack_ mpi_unpack
260: #define mpi_pack_size_ mpi_pack_size
261: #define mpi_type_struct_ mpi_type_struct
262: #define mpi_type_commit_ mpi_type_commit
263: #define mpi_wtime_ mpi_wtime
264: #define mpi_cancel_ mpi_cancel
265: #define mpi_comm_dup_ mpi_comm_dup
266: #define mpi_comm_free_ mpi_comm_free
267: #define mpi_get_count_ mpi_get_count
268: #define mpi_get_processor_name_ mpi_get_processor_name
269: #define mpi_initialized_ mpi_initialized
270: #define mpi_iprobe_ mpi_iprobe
271: #define mpi_probe_ mpi_probe
272: #define mpi_request_free_ mpi_request_free
273: #define mpi_ssend_ mpi_ssend
274: #define mpi_wait_ mpi_wait
275: #define mpi_comm_group_ mpi_comm_group
276: #define mpi_exscan_ mpi_exscan
277: #endif
279: #if defined(PETSC_HAVE_FORTRAN_UNDERSCORE_UNDERSCORE)
280: #define mpi_init_ mpi_init__
281: #define mpi_finalize_ mpi_finalize__
282: #define mpi_comm_size_ mpi_comm_size__
283: #define mpi_comm_rank_ mpi_comm_rank__
284: #define mpi_abort_ mpi_abort__
285: #define mpi_reduce_ mpi_reduce__
286: #define mpi_allreduce_ mpi_allreduce__
287: #define mpi_barrier_ mpi_barrier__
288: #define mpi_bcast_ mpi_bcast__
289: #define mpi_gather_ mpi_gather__
290: #define mpi_allgather_ mpi_allgather__
291: #define mpi_comm_split_ mpi_comm_split__
292: #define mpi_scan_ mpi_scan__
293: #define mpi_send_ mpi_send__
294: #define mpi_recv_ mpi_recv__
295: #define mpi_reduce_scatter_ mpi_reduce_scatter__
296: #define mpi_irecv_ mpi_irecv__
297: #define mpi_isend_ mpi_isend__
298: #define mpi_sendrecv_ mpi_sendrecv__
299: #define mpi_test_ mpi_test__
300: #define mpi_waitall_ mpi_waitall__
301: #define mpi_waitany_ mpi_waitany__
302: #define mpi_allgatherv_ mpi_allgatherv__
303: #define mpi_alltoallv_ mpi_alltoallv__
304: #define mpi_comm_create_ mpi_comm_create__
305: #define mpi_address_ mpi_address__
306: #define mpi_pack_ mpi_pack__
307: #define mpi_unpack_ mpi_unpack__
308: #define mpi_pack_size_ mpi_pack_size__
309: #define mpi_type_struct_ mpi_type_struct__
310: #define mpi_type_commit_ mpi_type_commit__
311: #define mpi_wtime_ mpi_wtime__
312: #define mpi_cancel_ mpi_cancel__
313: #define mpi_comm_dup_ mpi_comm_dup__
314: #define mpi_comm_free_ mpi_comm_free__
315: #define mpi_get_count_ mpi_get_count__
316: #define mpi_get_processor_name_ mpi_get_processor_name__
317: #define mpi_initialized_ mpi_initialized__
318: #define mpi_iprobe_ mpi_iprobe__
319: #define mpi_probe_ mpi_probe__
320: #define mpi_request_free_ mpi_request_free__
321: #define mpi_ssend_ mpi_ssend__
322: #define mpi_wait_ mpi_wait__
323: #define mpi_comm_group_ mpi_comm_group__
324: #define mpi_exscan_ mpi_exscan__
325: #endif
328: /* Do not build fortran interface if MPI namespace colision is to be avoided */
329: #if !defined(MPIUNI_AVOID_MPI_NAMESPACE)
331: void PETSC_STDCALL mpi_init_(int *ierr)
332: {
333: *MPI_Init((int*)0, (char***)0);
334: }
336: void PETSC_STDCALL mpi_finalize_(int *ierr)
337: {
338: *MPI_Finalize();
339: }
341: void PETSC_STDCALL mpi_comm_size_(MPI_Comm *comm,int *size,int *ierr)
342: {
343: *size = 1;
344: *0;
345: }
347: void PETSC_STDCALL mpi_comm_rank_(MPI_Comm *comm,int *rank,int *ierr)
348: {
349: *rank=0;
350: *ierr=MPI_SUCCESS;
351: }
353: void PETSC_STDCALL mpi_comm_split_(MPI_Comm *comm,int *color,int *key, MPI_Comm *newcomm, int *ierr)
354: {
355: *newcomm = *comm;
356: *ierr=MPI_SUCCESS;
357: }
359: void PETSC_STDCALL mpi_abort_(MPI_Comm *comm,int *errorcode,int *ierr)
360: {
361: abort();
362: *MPI_SUCCESS;
363: }
365: void PETSC_STDCALL mpi_reduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *root,int *comm,int *ierr)
366: {
367: MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPIUNI_DATASIZE[*datatype]);
368: *MPI_SUCCESS;
369: }
371: void PETSC_STDCALL mpi_allreduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
372: {
373: MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPIUNI_DATASIZE[*datatype]);
374: *MPI_SUCCESS;
375: }
377: void PETSC_STDCALL mpi_barrier_(MPI_Comm *comm,int *ierr)
378: {
379: *MPI_SUCCESS;
380: }
382: void PETSC_STDCALL mpi_bcast_(void *buf,int *count,int *datatype,int *root,int *comm,int *ierr)
383: {
384: *MPI_SUCCESS;
385: }
388: void PETSC_STDCALL mpi_gather_(void *sendbuf,int *scount,int *sdatatype, void* recvbuf, int* rcount, int* rdatatype, int *root,int *comm,int *ierr)
389: {
390: MPIUNI_Memcpy(recvbuf,sendbuf,(*scount)*MPIUNI_DATASIZE[*sdatatype]);
391: *MPI_SUCCESS;
392: }
394: void PETSC_STDCALL mpi_allgather_(void *sendbuf,int *scount,int *sdatatype, void* recvbuf, int* rcount, int* rdatatype,int *comm,int *ierr)
395: {
396: MPIUNI_Memcpy(recvbuf,sendbuf,(*scount)*MPIUNI_DATASIZE[*sdatatype]);
397: *MPI_SUCCESS;
398: }
400: void PETSC_STDCALL mpi_scan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
401: {
402: MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPIUNI_DATASIZE[*datatype]);
403: *MPI_SUCCESS;
404: }
406: void PETSC_STDCALL mpi_send_(void*buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr )
407: {
408: *MPI_Abort(MPI_COMM_WORLD,0);
409: }
411: void PETSC_STDCALL mpi_recv_(void*buf,int *count,int *datatype,int *source,int *tag,int *comm,int status,int *ierr )
412: {
413: *MPI_Abort(MPI_COMM_WORLD,0);
414: }
416: void PETSC_STDCALL mpi_reduce_scatter_(void*sendbuf,void*recvbuf,int *recvcounts,int *datatype,int *op,int *comm,int *ierr)
417: {
418: *MPI_Abort(MPI_COMM_WORLD,0);
419: }
421: void PETSC_STDCALL mpi_irecv_(void*buf,int *count, int *datatype, int *source, int *tag, int *comm, int *request, int *ierr)
422: {
423: *MPI_Abort(MPI_COMM_WORLD,0);
424: }
426: void PETSC_STDCALL mpi_isend_(void*buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *request, int *ierr)
427: {
428: *MPI_Abort(MPI_COMM_WORLD,0);
429: }
431: void PETSC_STDCALL mpi_sendrecv_(void*sendbuf,int *sendcount,int *sendtype,int *dest,int *sendtag,void*recvbuf,int *recvcount,int *recvtype,int *source,int *recvtag,int *comm,int *status,int *ierr)
432: {
433: MPIUNI_Memcpy(recvbuf,sendbuf,(*sendcount)*MPIUNI_DATASIZE[*sendtype]);
434: *MPI_SUCCESS;
435: }
437: void PETSC_STDCALL mpi_test_(int *request,int *flag,int *status,int *ierr)
438: {
439: *MPI_Abort(MPI_COMM_WORLD,0);
440: }
442: void PETSC_STDCALL mpi_waitall_(int *count,int *array_of_requests,int *array_of_statuses,int *ierr)
443: {
444: *MPI_SUCCESS;
445: }
447: void PETSC_STDCALL mpi_waitany_(int *count,int *array_of_requests,int * index, int *status,int *ierr)
448: {
449: *MPI_SUCCESS;
450: }
452: void PETSC_STDCALL mpi_allgatherv_(void*sendbuf,int *sendcount,int *sendtype,void*recvbuf,int *recvcounts,int *displs,int *recvtype,int *comm,int *ierr)
453: {
454: MPIUNI_Memcpy(recvbuf,sendbuf,(*sendcount)*MPIUNI_DATASIZE[*sendtype]);
455: *MPI_SUCCESS;
456: }
458: void PETSC_STDCALL mpi_alltoallv_(void*sendbuf,int *sendcounts,int *sdispls,int *sendtype,void*recvbuf,int *recvcounts,int *rdispls,int *recvtype,int *comm,int *ierr)
459: {
460: MPIUNI_Memcpy(recvbuf,sendbuf,(*sendcounts)*MPIUNI_DATASIZE[*sendtype]);
461: *MPI_SUCCESS;
462: }
464: void PETSC_STDCALL mpi_comm_create_(int *comm,int *group,int *newcomm,int *ierr)
465: {
466: *newcomm = *comm;
467: *MPI_SUCCESS;
468: }
470: void PETSC_STDCALL mpi_address_(void*location,MPIUNI_INTPTR *address,int *ierr)
471: {
472: *address = (MPIUNI_INTPTR) location;
473: *MPI_SUCCESS;
474: }
476: void PETSC_STDCALL mpi_pack_(void*inbuf,int *incount,int *datatype,void*outbuf,int *outsize,int *position,int *comm,int *ierr)
477: {
478: *MPI_Abort(MPI_COMM_WORLD,0);
479: }
481: void PETSC_STDCALL mpi_unpack_(void*inbuf,int *insize,int *position,void*outbuf,int *outcount,int *datatype,int *comm,int *ierr)
482: {
483: *MPI_Abort(MPI_COMM_WORLD,0);
484: }
486: void PETSC_STDCALL mpi_pack_size_(int *incount,int *datatype,int *comm,int *size,int *ierr)
487: {
488: *MPI_Abort(MPI_COMM_WORLD,0);
489: }
491: void PETSC_STDCALL mpi_type_struct_(int *count,int *array_of_blocklengths,int * array_of_displaments,int *array_of_types,int *newtype,int *ierr)
492: {
493: *MPI_Abort(MPI_COMM_WORLD,0);
494: }
496: void PETSC_STDCALL mpi_type_commit_(int *datatype,int *ierr)
497: {
498: *MPI_SUCCESS;
499: }
501: double PETSC_STDCALL mpi_wtime_(void)
502: {
503: return 0.0;
504: }
506: void PETSC_STDCALL mpi_cancel_(int *request,int *ierr)
507: {
508: *MPI_SUCCESS;
509: }
511: void PETSC_STDCALL mpi_comm_dup_(int *comm,int *out,int *ierr)
512: {
513: *out = *comm;
514: *MPI_SUCCESS;
515: }
517: void PETSC_STDCALL mpi_comm_free_(int *comm,int *ierr)
518: {
519: *MPI_SUCCESS;
520: }
522: void PETSC_STDCALL mpi_get_count_(int *status,int *datatype,int *count,int *ierr)
523: {
524: *MPI_Abort(MPI_COMM_WORLD,0);
525: }
527: /* duplicate from fortranimpl.h */
528: #if defined(PETSC_HAVE_FORTRAN_MIXED_STR_ARG)
529: #define PETSC_MIXED_LEN(len) ,int len
530: #define PETSC_END_LEN(len)
531: #else
532: #define PETSC_MIXED_LEN(len)
533: #define PETSC_END_LEN(len) ,int len
534: #endif
536: void PETSC_STDCALL mpi_get_processor_name_(char *name PETSC_MIXED_LEN(len),int *result_len,int *ierr PETSC_END_LEN(len))
537: {
538: MPIUNI_Memcpy(name,"localhost",9*sizeof(char));
539: *result_len = 9;
540: *MPI_SUCCESS;
541: }
543: void PETSC_STDCALL mpi_initialized_(int *flag,int *ierr)
544: {
545: *flag = MPI_was_initialized;
546: *MPI_SUCCESS;
547: }
549: void PETSC_STDCALL mpi_iprobe_(int *source,int *tag,int *comm,int *glag,int *status,int *ierr)
550: {
551: *MPI_SUCCESS;
552: }
554: void PETSC_STDCALL mpi_probe_(int *source,int *tag,int *comm,int *flag,int *status,int *ierr)
555: {
556: *MPI_SUCCESS;
557: }
559: void PETSC_STDCALL mpi_request_free_(int *request,int *ierr)
560: {
561: *MPI_SUCCESS;
562: }
564: void PETSC_STDCALL mpi_ssend_(void*buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr)
565: {
566: *MPI_Abort(MPI_COMM_WORLD,0);
567: }
569: void PETSC_STDCALL mpi_wait_(int *request,int *status,int *ierr)
570: {
571: *MPI_SUCCESS;
572: }
574: void PETSC_STDCALL mpi_comm_group_(int*comm,int*group,int *ierr)
575: {
576: *MPI_SUCCESS;
577: }
579: void PETSC_STDCALL mpi_exscan_(void*sendbuf,void*recvbuf,int*count,int*datatype,int*op,int*comm,int*ierr)
580: {
581: *MPI_SUCCESS;
582: }
584: #endif /* MPIUNI_AVOID_MPI_NAMESPACE */
586: #if defined(__cplusplus)
587: }
588: #endif