Actual source code: mpi.h

petsc-3.8.3 2017-12-09
Report Typos and Errors
  1: /*
  2:    This is a special set of bindings for uni-processor use of MPI by the PETSc library.

  4:    NOT ALL THE MPI CALLS ARE IMPLEMENTED CORRECTLY! Only those needed in PETSc.

  6:    For example,
  7:    * Does not implement send to self.
  8:    * Does not implement attributes correctly.
  9: */

 11: /*
 12:   The following info is a response to one of the petsc-maint questions
 13:   regarding MPIUNI.

 15:   MPIUNI was developed with the aim of getting PETSc compiled, and
 16:   usable in the absence of a full MPI implementation. With this, we
 17:   were able to provide PETSc on Windows, Windows64 even before any MPI
 18:   implementation was available on these platforms. [Or with certain
 19:   compilers - like borland, that do not have a usable MPI
 20:   implementation]

 22:   However - providing a seqential, standards compliant MPI
 23:   implementation is *not* the goal of MPIUNI. The development strategy
 24:   was - to make enough changes to it so that PETSc sources, examples
 25:   compile without errors, and runs in the uni-processor mode. This is
 26:   the reason each function is not documented.

 28:   PETSc usage of MPIUNI is primarily from C. However a minimal fortran
 29:   interface is also provided - to get PETSc fortran examples with a
 30:   few MPI calls working.

 32:   One of the optimzation with MPIUNI, is to avoid the function call
 33:   overhead, when possible. Hence most of the C functions are
 34:   implemented as macros. However the function calls cannot be avoided
 35:   with fortran usage.

 37:   Most PETSc objects have both sequential and parallel
 38:   implementations, which are separate. For eg: We have two types of
 39:   sparse matrix storage formats - SeqAIJ, and MPIAIJ. Some MPI
 40:   routines are used in the Seq part, but most of them are used in the
 41:   MPI part. The send/receive calls can be found mostly in the MPI
 42:   part.

 44:   When MPIUNI is used, only the Seq version of the PETSc objects are
 45:   used, even though the MPI variant of the objects are compiled. Since
 46:   there are no send/receive calls in the Seq variant, PETSc works fine
 47:   with MPIUNI in seq mode.

 49:   The reason some send/receive functions are defined to abort(), is to
 50:   detect sections of code that use send/receive functions, and gets
 51:   executed in the sequential mode. (which shouldn't happen in case of
 52:   PETSc).

 54:   Proper implementation of send/receive would involve writing a
 55:   function for each of them. Inside each of these functions, we have
 56:   to check if the send is to self or receive is from self, and then
 57:   doing the buffering accordingly (until the receive is called) - or
 58:   what if a nonblocking receive is called, do a copy etc.. Handling
 59:   the buffering aspects might be complicated enough, that in this
 60:   case, a proper implementation of MPI might as well be used. This is
 61:   the reason the send to self is not implemented in MPIUNI, and never
 62:   will be.

 64:   Proper implementations of MPI [for eg: MPICH & OpenMPI] are
 65:   available for most machines. When these packages are available, Its
 66:   generally preferable to use one of them instead of MPIUNI - even if
 67:   the user is using PETSc sequentially.

 69:     - MPIUNI does not support all MPI functions [or functionality].
 70:     Hence it might not work with external packages or user code that
 71:     might have MPI calls in it.

 73:     - MPIUNI is not a standards compliant implementation for np=1.
 74:     For eg: if the user code has send/recv to self, then it will
 75:     abort. [Similar issues with a number of other MPI functionality]
 76:     However MPICH & OpenMPI are the correct implementations of MPI
 77:     standard for np=1.

 79:     - When user code uses multiple MPI based packages that have their
 80:     own *internal* stubs equivalent to MPIUNI - in sequential mode,
 81:     invariably these multiple implementations of MPI for np=1 conflict
 82:     with each other. The correct thing to do is: make all such
 83:     packages use the *same* MPI implementation for np=1. MPICH/OpenMPI
 84:     satisfy this requirement correctly [and hence the correct choice].

 86:     - Using MPICH/OpenMPI sequentially should have minimal
 87:     disadvantages. [for eg: these binaries can be run without
 88:     mpirun/mpiexec as ./executable, without requiring any extra
 89:     configurations for ssh/rsh/daemons etc..]. This should not be a
 90:     reason to avoid these packages for sequential use.

 92:     Instructions for building standalone MPIUNI [for eg: linux/gcc+gfortran]:
 93:     - extract include/mpiuni/mpi.h,mpif.f, src/sys/mpiuni/mpi.c from PETSc
 94:     - remove reference to petscconf.h from mpi.h
 95:     - gcc -c mpi.c -DPETSC_HAVE_STDLIB_H -DPETSC_HAVE_FORTRAN_UNDERSCORE
 96:     - ar cr libmpiuni.a mpi.o

 98: */


103: /* Required by abort() in mpi.c & for win64 */
104: #include <petscconf.h>
105: #include <stddef.h>

107: /*  This is reproduced from petscsys.h so that mpi.h can be used standalone without first including petscsys.h */
108: #if defined(_WIN32) && defined(PETSC_USE_SHARED_LIBRARIES)
109: #  define MPIUni_ __declspec(dllexport)
110: #  define MPIUni_PETSC_DLLIMPORT __declspec(dllimport)
111: #elif defined(PETSC_USE_VISIBILITY_CXX) && defined(__cplusplus)
112: #  define MPIUni_ __attribute__((visibility ("default")))
113: #  define MPIUni_PETSC_DLLIMPORT __attribute__((visibility ("default")))
114: #elif defined(PETSC_USE_VISIBILITY_C) && !defined(__cplusplus)
115: #  define MPIUni_ __attribute__((visibility ("default")))
116: #  define MPIUni_PETSC_DLLIMPORT __attribute__((visibility ("default")))
117: #else
118: #  define MPIUni_
119: #  define MPIUni_PETSC_DLLIMPORT
120: #endif

122: #if defined(petsc_EXPORTS)
123: #  define MPIUni_PETSC_VISIBILITY_PUBLIC MPIUni_
124: #else  /* Win32 users need this to import symbols from petsc.dll */
125: #  define MPIUni_PETSC_VISIBILITY_PUBLIC MPIUni_PETSC_DLLIMPORT
126: #endif

128: #if defined(__cplusplus)
129: #define MPIUni_PETSC_EXTERN extern "C" MPIUni_PETSC_VISIBILITY_PUBLIC
130: #else
131: #define MPIUni_PETSC_EXTERN extern MPIUni_PETSC_VISIBILITY_PUBLIC
132: #endif

134: #if defined(__cplusplus)
135: extern "C" {
136: #endif

138: /* MPI_Aint has to be an signed integral type large enough to hold a pointer */
139: #if PETSC_SIZEOF_INT == PETSC_SIZEOF_VOID_P
140: typedef int MPI_Aint;
141: #elif PETSC_SIZEOF_LONG == PETSC_SIZEOF_VOID_P
142: typedef long MPI_Aint;
143: #else
144: typedef ptrdiff_t MPI_Aint;
145: #endif

147: /* old 32bit MS compiler does not support long long */
148: #if defined(PETSC_SIZEOF_LONG_LONG)
149: typedef long long MPIUNI_INT64;
150: typedef unsigned long long MPIUNI_UINT64;
151: #elif defined(PETSC_HAVE___INT64)
152: typedef _int64 MPIUNI_INT64;
153: typedef unsigned _int64 MPIUNI_UINT64;
154: #else
155: #error "cannot determine MPIUNI_INT64, MPIUNI_UINT64 types"
156: #endif

158: /*

160:  MPIUNI_ARG is used in the macros below only to stop various C/C++ compilers
161:  from generating warning messages about unused variables while compiling PETSc.
162: */
163: MPIUni_PETSC_EXTERN void *MPIUNI_TMP;
164: #define MPIUNI_ARG(arg) (MPIUNI_TMP = (void *)(MPI_Aint) (arg))

166: #define MPI_IDENT            0
167: #define MPI_CONGRUENT        1
168: #define MPI_SIMILAR          2
169: #define MPI_UNEQUAL          3

171: #define MPI_BOTTOM   ((void *) 0)
172: #define MPI_IN_PLACE ((void *)-1)

174: #define MPI_ANY_SOURCE     (-2)
175: #define MPI_ANY_TAG        (-1)
176: #define MPI_UNDEFINED  (-32766)

178: #define MPI_SUCCESS          0
179: #define MPI_ERR_OTHER       17
180: #define MPI_ERR_UNKNOWN     18
181: #define MPI_ERR_INTERN      21

183: #define MPI_KEYVAL_INVALID   0
184: #define MPI_TAG_UB           0

186: #define MPI_MAX_PROCESSOR_NAME 1024
187: #define MPI_MAX_ERROR_STRING   2056

189: typedef int MPI_Comm;
190: #define MPI_COMM_NULL  0
191: #define MPI_COMM_SELF  1
192: #define MPI_COMM_WORLD 2

194: typedef int MPI_Info;
195: #define MPI_INFO_NULL 0

197: typedef struct {int MPI_SOURCE,MPI_TAG,MPI_ERROR;} MPI_Status;
198: #define MPI_STATUS_IGNORE   (MPI_Status *)0
199: #define MPI_STATUSES_IGNORE (MPI_Status *)0

201: /* 32-bit packing scheme: [combiner:4 | type-index:8 | count:12 | base-bytes:8] */
202: /* Any changes here must also be reflected in mpif.h */
203: typedef int MPI_Datatype;
204: #define MPI_DATATYPE_NULL      0
205: #define MPI_PACKED             0

207: #define MPI_FLOAT              (1 << 20 | 1 << 8 | (int)sizeof(float))
208: #define MPI_DOUBLE             (1 << 20 | 1 << 8 | (int)sizeof(double))
209: #define MPI_LONG_DOUBLE        (1 << 20 | 1 << 8 | (int)sizeof(long double))

211: #define MPI_COMPLEX            (2 << 20 | 1 << 8 | 2*(int)sizeof(float))
212: #define MPI_C_COMPLEX          (2 << 20 | 1 << 8 | 2*(int)sizeof(float))
213: #define MPI_C_FLOAT_COMPLEX    (2 << 20 | 1 << 8 | 2*(int)sizeof(float))
214: #define MPI_DOUBLE_COMPLEX     (2 << 20 | 1 << 8 | 2*(int)sizeof(double))
215: #define MPI_C_DOUBLE_COMPLEX   (2 << 20 | 1 << 8 | 2*(int)sizeof(double))

217: #define MPI_CHAR               (3 << 20 | 1 << 8 | (int)sizeof(char))
218: #define MPI_BYTE               (3 << 20 | 1 << 8 | (int)sizeof(char))
219: #define MPI_SIGNED_CHAR        (3 << 20 | 1 << 8 | (int)sizeof(signed char))
220: #define MPI_UNSIGNED_CHAR      (3 << 20 | 1 << 8 | (int)sizeof(unsigned char))

222: #define MPI_SHORT              (4 << 20 | 1 << 8 | (int)sizeof(short))
223: #define MPI_INT                (4 << 20 | 1 << 8 | (int)sizeof(int))
224: #define MPI_LONG               (4 << 20 | 1 << 8 | (int)sizeof(long))
225: #define MPI_LONG_LONG          (4 << 20 | 1 << 8 | (int)sizeof(MPIUNI_INT64))
226: #define MPI_LONG_LONG_INT      MPI_LONG_LONG

228: #define MPI_UNSIGNED_SHORT     (5 << 20 | 1 << 8 | (int)sizeof(unsigned short))
229: #define MPI_UNSIGNED           (5 << 20 | 1 << 8 | (int)sizeof(unsigned))
230: #define MPI_UNSIGNED_LONG      (5 << 20 | 1 << 8 | (int)sizeof(unsigned long))
231: #define MPI_UNSIGNED_LONG_LONG (5 << 20 | 1 << 8 | (int)sizeof(MPIUNI_UINT64))

233: #define MPI_FLOAT_INT          (10 << 20 | 1 << 8 | (int)(sizeof(float) + sizeof(int)))
234: #define MPI_DOUBLE_INT         (11 << 20 | 1 << 8 | (int)(sizeof(double) + sizeof(int)))
235: #define MPI_LONG_INT           (12 << 20 | 1 << 8 | (int)(sizeof(long) + sizeof(int)))
236: #define MPI_SHORT_INT          (13 << 20 | 1 << 8 | (int)(sizeof(short) + sizeof(int)))
237: #define MPI_2INT               (14 << 20 | 1 << 8 | (int)(2*sizeof(int)))

239: #define MPI_ORDER_C            0
240: #define MPI_ORDER_FORTRAN      1

242: #define MPI_sizeof_default(datatype) ((((datatype) >> 8) & 0xfff) * ((datatype) & 0xff))
243: #if defined(PETSC_USE_REAL___FP16)
244: extern MPI_Datatype MPIU___FP16;
245: #define MPI_sizeof(datatype) ((datatype == MPIU___FP16) ? (int)(2*sizeof(char)) : MPI_sizeof_default(datatype))
246: #elif defined(PETSC_USE_REAL___FLOAT128)
247: extern MPI_Datatype MPIU___FLOAT128;
248: #define MPI_sizeof(datatype) ((datatype == MPIU___FLOAT128) ? (int)(2*sizeof(double)) : MPI_sizeof_default(datatype))
249: #else
250: #define MPI_sizeof(datatype) (MPI_sizeof_default(datatype))
251: #endif

253: MPIUni_PETSC_EXTERN int MPIUNI_Memcpy(void*,const void*,int);

255: typedef int MPI_Request;
256: #define MPI_REQUEST_NULL 0

258: typedef int MPI_Group;
259: #define MPI_GROUP_NULL  0
260: #define MPI_GROUP_EMPTY 0

262: typedef int MPI_Op;
263: #define MPI_OP_NULL    0
264: #define MPI_SUM        1
265: #define MPI_MAX        2
266: #define MPI_MIN        3
267: #define MPI_REPLACE    4
268: #define MPI_PROD       5
269: #define MPI_LAND       6
270: #define MPI_BAND       7
271: #define MPI_LOR        8
272: #define MPI_BOR        9
273: #define MPI_LXOR       10
274: #define MPI_BXOR       11
275: #define MPI_MAXLOC     12
276: #define MPI_MINLOC     13

278: typedef void (MPI_User_function)(void*, void *, int *, MPI_Datatype *);

280: typedef int MPI_Errhandler;
281: #define MPI_ERRHANDLER_NULL  0
282: #define MPI_ERRORS_RETURN    0
283: #define MPI_ERRORS_ARE_FATAL 0
284: typedef void (MPI_Handler_function)(MPI_Comm *, int *, ...);

286: /*
287:   Prototypes of some functions which are implemented in mpi.c
288: */
289: typedef int (MPI_Copy_function)(MPI_Comm,int,void *,void *,void *,int *);
290: typedef int (MPI_Delete_function)(MPI_Comm,int,void *,void *);
291: #define MPI_NULL_COPY_FN   (MPI_Copy_function*)0
292: #define MPI_NULL_DELETE_FN (MPI_Delete_function*)0

294: /*
295:   To enable linking PETSc+MPIUNI with any other package that might have its
296:   own MPIUNI (equivalent implementation) we need to avoid using 'MPI'
297:   namespace for MPIUNI functions that go into the petsc library.

299:   For C functions below (that get compiled into petsc library) - we map
300:   the 'MPI' functions to use 'Petsc_MPI' namespace.

302:   With fortran we use similar mapping - thus requiring the use of
303:   c-preprocessor with mpif.h
304: */
305: #define MPI_Abort         Petsc_MPI_Abort
306: #define MPIUni_Abort      Petsc_MPIUni_Abort
307: #define MPI_Attr_get      Petsc_MPI_Attr_get
308: #define MPI_Keyval_free   Petsc_MPI_Keyval_free
309: #define MPI_Attr_put      Petsc_MPI_Attr_put
310: #define MPI_Attr_delete   Petsc_MPI_Attr_delete
311: #define MPI_Keyval_create Petsc_MPI_Keyval_create
312: #define MPI_Comm_free     Petsc_MPI_Comm_free
313: #define MPI_Comm_dup      Petsc_MPI_Comm_dup
314: #define MPI_Comm_create   Petsc_MPI_Comm_create
315: #define MPI_Init          Petsc_MPI_Init
316: #define MPI_Finalize      Petsc_MPI_Finalize
317: #define MPI_Initialized   Petsc_MPI_Initialized
318: #define MPI_Finalized     Petsc_MPI_Finalized
319: #define MPI_Comm_size     Petsc_MPI_Comm_size
320: #define MPI_Comm_rank     Petsc_MPI_Comm_rank
321: #define MPI_Wtime         Petsc_MPI_Wtime
322: #define MPI_Type_get_envelope Petsc_MPI_Type_get_envelope
323: #define MPI_Type_get_contents Petsc_MPI_Type_get_contents

325: /* identical C bindings */
326: #define MPI_Comm_copy_attr_function   MPI_Copy_function
327: #define MPI_Comm_delete_attr_function MPI_Delete_function
328: #define MPI_COMM_NULL_COPY_FN         MPI_NULL_COPY_FN
329: #define MPI_COMM_NULL_DELETE_FN       MPI_NULL_DELETE_FN
330: #define MPI_Comm_create_keyval        Petsc_MPI_Keyval_create
331: #define MPI_Comm_free_keyval          Petsc_MPI_Keyval_free
332: #define MPI_Comm_get_attr             Petsc_MPI_Attr_get
333: #define MPI_Comm_set_attr             Petsc_MPI_Attr_put

335: MPIUni_PETSC_EXTERN int    MPIUni_Abort(MPI_Comm,int);
336: MPIUni_PETSC_EXTERN int    MPI_Abort(MPI_Comm,int);
337: MPIUni_PETSC_EXTERN int    MPI_Attr_get(MPI_Comm comm,int keyval,void *attribute_val,int *flag);
338: MPIUni_PETSC_EXTERN int    MPI_Keyval_free(int*);
339: MPIUni_PETSC_EXTERN int    MPI_Attr_put(MPI_Comm,int,void *);
340: MPIUni_PETSC_EXTERN int    MPI_Attr_delete(MPI_Comm,int);
341: MPIUni_PETSC_EXTERN int    MPI_Keyval_create(MPI_Copy_function *,MPI_Delete_function *,int *,void *);
342: MPIUni_PETSC_EXTERN int    MPI_Comm_free(MPI_Comm*);
343: MPIUni_PETSC_EXTERN int    MPI_Comm_dup(MPI_Comm,MPI_Comm *);
344: MPIUni_PETSC_EXTERN int    MPI_Comm_create(MPI_Comm,MPI_Group,MPI_Comm *);
345: MPIUni_PETSC_EXTERN int    MPI_Init(int *, char ***);
346: MPIUni_PETSC_EXTERN int    MPI_Finalize(void);
347: MPIUni_PETSC_EXTERN int    MPI_Initialized(int*);
348: MPIUni_PETSC_EXTERN int    MPI_Finalized(int*);
349: MPIUni_PETSC_EXTERN int    MPI_Comm_size(MPI_Comm,int*);
350: MPIUni_PETSC_EXTERN int    MPI_Comm_rank(MPI_Comm,int*);
351: MPIUni_PETSC_EXTERN double MPI_Wtime(void);

353: MPIUni_PETSC_EXTERN int MPI_Type_get_envelope(MPI_Datatype,int*,int*,int*,int*);
354: MPIUni_PETSC_EXTERN int MPI_Type_get_contents(MPI_Datatype,int,int,int,int*,MPI_Aint*,MPI_Datatype*);

356: /*
357:     Routines we have replace with macros that do nothing
358:     Some return error codes others return success
359: */

361: typedef int MPI_Fint;
362: #define MPI_Comm_f2c(comm) (MPI_Comm)(comm)
363: #define MPI_Comm_c2f(comm) (MPI_Fint)(comm)
364: #define MPI_Type_f2c(type) (MPI_Datatype)(type)
365: #define MPI_Type_c2f(type) (MPI_Fint)(type)
366: #define MPI_Op_f2c(op)     (MPI_Op)(op)
367: #define MPI_Op_c2f(op)     (MPI_Fint)(op)

369: #define MPI_Send(buf,count,datatype,dest,tag,comm)  \
370:      (MPIUNI_ARG(buf),\
371:       MPIUNI_ARG(count),\
372:       MPIUNI_ARG(datatype),\
373:       MPIUNI_ARG(dest),\
374:       MPIUNI_ARG(tag),\
375:       MPIUNI_ARG(comm),\
376:       MPIUni_Abort(MPI_COMM_WORLD,0))
377: #define MPI_Recv(buf,count,datatype,source,tag,comm,status) \
378:      (MPIUNI_ARG(buf),\
379:       MPIUNI_ARG(count),\
380:       MPIUNI_ARG(datatype),\
381:       MPIUNI_ARG(source),\
382:       MPIUNI_ARG(tag),\
383:       MPIUNI_ARG(comm),\
384:       MPIUNI_ARG(status),\
385:       MPIUni_Abort(MPI_COMM_WORLD,0))
386: #define MPI_Get_count(status,datatype,count) \
387:      (MPIUNI_ARG(status),\
388:       MPIUNI_ARG(datatype),\
389:       MPIUNI_ARG(count),\
390:       MPIUni_Abort(MPI_COMM_WORLD,0))
391: #define MPI_Bsend(buf,count,datatype,dest,tag,comm)  \
392:      (MPIUNI_ARG(buf),\
393:       MPIUNI_ARG(count),\
394:       MPIUNI_ARG(datatype),\
395:       MPIUNI_ARG(dest),\
396:       MPIUNI_ARG(tag),\
397:       MPIUNI_ARG(comm),\
398:       MPIUni_Abort(MPI_COMM_WORLD,0))
399: #define MPI_Ssend(buf,count,datatype,dest,tag,comm) \
400:      (MPIUNI_ARG(buf),\
401:       MPIUNI_ARG(count),\
402:       MPIUNI_ARG(datatype),\
403:       MPIUNI_ARG(dest),\
404:       MPIUNI_ARG(tag),\
405:       MPIUNI_ARG(comm),\
406:       MPIUni_Abort(MPI_COMM_WORLD,0))
407: #define MPI_Rsend(buf,count,datatype,dest,tag,comm) \
408:      (MPIUNI_ARG(buf),\
409:       MPIUNI_ARG(count),\
410:       MPIUNI_ARG(datatype),\
411:       MPIUNI_ARG(dest),\
412:       MPIUNI_ARG(tag),\
413:       MPIUNI_ARG(comm),\
414:       MPIUni_Abort(MPI_COMM_WORLD,0))
415: #define MPI_Buffer_attach(buffer,size) \
416:      (MPIUNI_ARG(buffer),\
417:       MPIUNI_ARG(size),\
418:       MPI_SUCCESS)
419: #define MPI_Buffer_detach(buffer,size)\
420:      (MPIUNI_ARG(buffer),\
421:       MPIUNI_ARG(size),\
422:       MPI_SUCCESS)
423: #define MPI_Ibsend(buf,count,datatype,dest,tag,comm,request) \
424:      (MPIUNI_ARG(buf),\
425:       MPIUNI_ARG(count),\
426:       MPIUNI_ARG(datatype),\
427:       MPIUNI_ARG(dest),\
428:       MPIUNI_ARG(tag),\
429:       MPIUNI_ARG(comm),\
430:       MPIUNI_ARG(request),\
431:       MPIUni_Abort(MPI_COMM_WORLD,0))
432: #define MPI_Issend(buf,count,datatype,dest,tag,comm,request) \
433:      (MPIUNI_ARG(buf),\
434:       MPIUNI_ARG(count),\
435:       MPIUNI_ARG(datatype),\
436:       MPIUNI_ARG(dest),\
437:       MPIUNI_ARG(tag),\
438:       MPIUNI_ARG(comm),\
439:       MPIUNI_ARG(request),\
440:       MPIUni_Abort(MPI_COMM_WORLD,0))
441: #define MPI_Irsend(buf,count,datatype,dest,tag,comm,request) \
442:      (MPIUNI_ARG(buf),\
443:       MPIUNI_ARG(count),\
444:       MPIUNI_ARG(datatype),\
445:       MPIUNI_ARG(dest),\
446:       MPIUNI_ARG(tag),\
447:       MPIUNI_ARG(comm),\
448:       MPIUNI_ARG(request),\
449:       MPIUni_Abort(MPI_COMM_WORLD,0))
450: #define MPI_Irecv(buf,count,datatype,source,tag,comm,request) \
451:      (MPIUNI_ARG(buf),\
452:       MPIUNI_ARG(count),\
453:       MPIUNI_ARG(datatype),\
454:       MPIUNI_ARG(source),\
455:       MPIUNI_ARG(tag),\
456:       MPIUNI_ARG(comm),\
457:       MPIUNI_ARG(request),\
458:       MPIUni_Abort(MPI_COMM_WORLD,0))
459: #define MPI_Isend(buf,count,datatype,dest,tag,comm,request) \
460:      (MPIUNI_ARG(buf),\
461:       MPIUNI_ARG(count),\
462:       MPIUNI_ARG(datatype),\
463:       MPIUNI_ARG(dest),\
464:       MPIUNI_ARG(tag),\
465:       MPIUNI_ARG(comm),\
466:       MPIUNI_ARG(request),\
467:       MPIUni_Abort(MPI_COMM_WORLD,0))
468: #define MPI_Wait(request,status) \
469:      (MPIUNI_ARG(request),\
470:       MPIUNI_ARG(status),\
471:       MPI_SUCCESS)
472: #define MPI_Test(request,flag,status) \
473:      (MPIUNI_ARG(request),\
474:       MPIUNI_ARG(status),\
475:       *(flag) = 0,\
476:       MPI_SUCCESS)
477: #define MPI_Request_free(request) \
478:      (MPIUNI_ARG(request),\
479:       MPI_SUCCESS)
480: #define MPI_Waitany(count,array_of_requests,index,status) \
481:      (MPIUNI_ARG(count),\
482:       MPIUNI_ARG(array_of_requests),\
483:       MPIUNI_ARG(status),\
484:       *(index) = 0,\
485:       MPI_SUCCESS)
486: #define MPI_Testany(a,b,c,d,e) \
487:      (MPIUNI_ARG(a),\
488:       MPIUNI_ARG(b),\
489:       MPIUNI_ARG(c),\
490:       MPIUNI_ARG(d),\
491:       MPIUNI_ARG(e),\
492:       MPI_SUCCESS)
493: #define MPI_Waitall(count,array_of_requests,array_of_statuses) \
494:      (MPIUNI_ARG(count),\
495:       MPIUNI_ARG(array_of_requests),\
496:       MPIUNI_ARG(array_of_statuses),\
497:       MPI_SUCCESS)
498: #define MPI_Testall(count,array_of_requests,flag,array_of_statuses) \
499:      (MPIUNI_ARG(count),\
500:       MPIUNI_ARG(array_of_requests),\
501:       MPIUNI_ARG(flag),\
502:       MPIUNI_ARG(array_of_statuses),\
503:       MPI_SUCCESS)
504: #define MPI_Waitsome(incount,array_of_requests,outcount,\
505:                      array_of_indices,array_of_statuses)        \
506:      (MPIUNI_ARG(incount),\
507:       MPIUNI_ARG(array_of_requests),\
508:       MPIUNI_ARG(outcount),\
509:       MPIUNI_ARG(array_of_indices),\
510:       MPIUNI_ARG(array_of_statuses),\
511:       MPI_SUCCESS)
512: #define MPI_Comm_group(comm,group) \
513:      (MPIUNI_ARG(comm),\
514:       MPIUNI_ARG(group),\
515:       MPI_SUCCESS)
516: #define MPI_Group_incl(group,n,ranks,newgroup) \
517:      (MPIUNI_ARG(group),\
518:       MPIUNI_ARG(n),\
519:       MPIUNI_ARG(ranks),\
520:       MPIUNI_ARG(newgroup),\
521:       MPI_SUCCESS)
522: #define MPI_Testsome(incount,array_of_requests,outcount,\
523:                      array_of_indices,array_of_statuses) \
524:      (MPIUNI_ARG(incount),\
525:       MPIUNI_ARG(array_of_requests),\
526:       MPIUNI_ARG(outcount),\
527:       MPIUNI_ARG(array_of_indices),\
528:       MPIUNI_ARG(array_of_statuses),\
529:       MPI_SUCCESS)
530: #define MPI_Iprobe(source,tag,comm,flag,status) \
531:      (MPIUNI_ARG(source),\
532:       MPIUNI_ARG(tag),\
533:       MPIUNI_ARG(comm),\
534:       *(flag)=0,\
535:       MPIUNI_ARG(status),\
536:       MPI_SUCCESS)
537: #define MPI_Probe(source,tag,comm,status) \
538:      (MPIUNI_ARG(source),\
539:       MPIUNI_ARG(tag),\
540:       MPIUNI_ARG(comm),\
541:       MPIUNI_ARG(status),\
542:       MPI_SUCCESS)
543: #define MPI_Cancel(request) \
544:      (MPIUNI_ARG(request),\
545:       MPI_SUCCESS)
546: #define MPI_Test_cancelled(status,flag) \
547:      (MPIUNI_ARG(status),\
548:       *(flag)=0,\
549:       MPI_SUCCESS)
550: #define MPI_Send_init(buf,count,datatype,dest,tag,comm,request) \
551:      (MPIUNI_ARG(buf),\
552:       MPIUNI_ARG(count),\
553:       MPIUNI_ARG(datatype),\
554:       MPIUNI_ARG(dest),\
555:       MPIUNI_ARG(tag),\
556:       MPIUNI_ARG(comm),\
557:       MPIUNI_ARG(request),\
558:       MPI_SUCCESS)
559: #define MPI_Bsend_init(buf,count,datatype,dest,tag,comm,request) \
560:      (MPIUNI_ARG(buf),\
561:       MPIUNI_ARG(count),\
562:       MPIUNI_ARG(datatype),\
563:       MPIUNI_ARG(dest),\
564:       MPIUNI_ARG(tag),\
565:       MPIUNI_ARG(comm),\
566:       MPIUNI_ARG(request),\
567:       MPI_SUCCESS)
568: #define MPI_Ssend_init(buf,count,datatype,dest,tag,comm,request) \
569:      (MPIUNI_ARG(buf),\
570:       MPIUNI_ARG(count),\
571:       MPIUNI_ARG(datatype),\
572:       MPIUNI_ARG(dest),\
573:       MPIUNI_ARG(tag),\
574:       MPIUNI_ARG(comm),\
575:       MPIUNI_ARG(request),\
576:       MPI_SUCCESS)
577: #define MPI_Bsend_init(buf,count,datatype,dest,tag,comm,request) \
578:      (MPIUNI_ARG(buf),\
579:       MPIUNI_ARG(count),\
580:       MPIUNI_ARG(datatype),\
581:       MPIUNI_ARG(dest),\
582:       MPIUNI_ARG(tag),\
583:       MPIUNI_ARG(comm),\
584:       MPIUNI_ARG(request),\
585:       MPI_SUCCESS)
586: #define MPI_Rsend_init(buf,count,datatype,dest,tag,comm,request) \
587:      (MPIUNI_ARG(buf),\
588:       MPIUNI_ARG(count),\
589:       MPIUNI_ARG(datatype),\
590:       MPIUNI_ARG(dest),\
591:       MPIUNI_ARG(tag),\
592:       MPIUNI_ARG(comm),\
593:       MPIUNI_ARG(request),\
594:       MPI_SUCCESS)
595: #define MPI_Recv_init(buf,count,datatype,source,tag,comm,request) \
596:      (MPIUNI_ARG(buf),\
597:       MPIUNI_ARG(count),\
598:       MPIUNI_ARG(datatype),\
599:       MPIUNI_ARG(source),\
600:       MPIUNI_ARG(tag),\
601:       MPIUNI_ARG(comm),\
602:       MPIUNI_ARG(request),\
603:       MPI_SUCCESS)
604: #define MPI_Start(request) \
605:      (MPIUNI_ARG(request),\
606:       MPI_SUCCESS)
607: #define MPI_Startall(count,array_of_requests) \
608:      (MPIUNI_ARG(count),\
609:       MPIUNI_ARG(array_of_requests),\
610:       MPI_SUCCESS)
611: #define MPI_Sendrecv(sendbuf,sendcount,sendtype,\
612:                      dest,sendtag,recvbuf,recvcount,\
613:                      recvtype,source,recvtag,\
614:                      comm,status) \
615:      (MPIUNI_ARG(dest),\
616:       MPIUNI_ARG(sendtag),\
617:       MPIUNI_ARG(recvcount),\
618:       MPIUNI_ARG(recvtype),\
619:       MPIUNI_ARG(source),\
620:       MPIUNI_ARG(recvtag),\
621:       MPIUNI_ARG(comm),\
622:       MPIUNI_ARG(status),\
623:       MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
624: #define MPI_Sendrecv_replace(buf,count,datatype,dest,sendtag,\
625:                              source,recvtag,comm,status) \
626:      (MPIUNI_ARG(buf),\
627:       MPIUNI_ARG(count),\
628:       MPIUNI_ARG(datatype),\
629:       MPIUNI_ARG(dest),\
630:       MPIUNI_ARG(sendtag),\
631:       MPIUNI_ARG(source),\
632:       MPIUNI_ARG(recvtag),\
633:       MPIUNI_ARG(comm),\
634:       MPIUNI_ARG(status),\
635:       MPI_SUCCESS)

637: #define MPI_COMBINER_NAMED      0
638: #define MPI_COMBINER_DUP        1
639: #define MPI_COMBINER_CONTIGUOUS 2
640:   /* 32-bit packing scheme: [combiner:4 | type-index:8 | count:12 | base-bytes:8] */
641: #define MPI_Type_dup(oldtype,newtype) \
642:      (*(newtype) = oldtype, MPI_SUCCESS)
643: #define MPI_Type_contiguous(count,oldtype,newtype) \
644:      (*(newtype) = (MPI_COMBINER_CONTIGUOUS<<28)|((oldtype)&0x0ff00000)|(((oldtype)>>8&0xfff)*(count))<<8|((oldtype)&0xff), MPI_SUCCESS)
645: #define MPI_Type_vector(count,blocklength,stride,oldtype,newtype) \
646:      (MPIUNI_ARG(count),\
647:       MPIUNI_ARG(blocklength),\
648:       MPIUNI_ARG(stride),\
649:       MPIUNI_ARG(oldtype),\
650:       MPIUNI_ARG(newtype),\
651:       MPIUni_Abort(MPI_COMM_WORLD,0))
652: #define MPI_Type_hvector(count,blocklength,stride,oldtype,newtype) \
653:      (MPIUNI_ARG(count),\
654:       MPIUNI_ARG(blocklength),\
655:       MPIUNI_ARG(stride),\
656:       MPIUNI_ARG(oldtype),\
657:       MPIUNI_ARG(newtype),\
658:       MPIUni_Abort(MPI_COMM_WORLD,0))
659: #define MPI_Type_indexed(count,array_of_blocklengths,array_of_displacements,oldtype,newtype) \
660:      (MPIUNI_ARG(count),\
661:       MPIUNI_ARG(array_of_blocklengths),\
662:       MPIUNI_ARG(array_of_displacements),\
663:       MPIUNI_ARG(oldtype),\
664:       MPIUNI_ARG(newtype),\
665:       MPIUni_Abort(MPI_COMM_WORLD,0))
666: #define MPI_Type_hindexed(count,array_of_blocklengths,array_of_displacements,oldtype,newtype) \
667:      (MPIUNI_ARG(count),\
668:       MPIUNI_ARG(array_of_blocklengths),\
669:       MPIUNI_ARG(array_of_displacements),\
670:       MPIUNI_ARG(oldtype),\
671:       MPIUNI_ARG(newtype),\
672:       MPIUni_Abort(MPI_COMM_WORLD,0))
673: #define MPI_Type_struct(count,array_of_blocklengths,array_of_displacements,array_of_types,newtype) \
674:      (MPIUNI_ARG(count),\
675:       MPIUNI_ARG(array_of_blocklengths),\
676:       MPIUNI_ARG(array_of_displacements),\
677:       MPIUNI_ARG(array_of_types),\
678:       MPIUNI_ARG(newtype),\
679:       MPIUni_Abort(MPI_COMM_WORLD,0))
680: #define MPI_Address(location,address) \
681:      (*(address) = (MPI_Aint)((char *)(location)), MPI_SUCCESS)
682: #define MPI_Type_size(datatype,size) (*(size) = MPI_sizeof((datatype)), MPI_SUCCESS)
683: #define MPI_Type_lb(datatype,lb) (MPIUNI_ARG(datatype), *(lb) = 0, MPI_SUCCESS)
684: #define MPI_Type_ub(datatype,ub) (*(ub) = MPI_sizeof((datatype)), MPI_SUCCESS)
685: #define MPI_Type_extent(datatype,extent) \
686:      (*(extent) = MPI_sizeof((datatype)), MPI_SUCCESS)
687: #define MPI_Type_get_extent(datatype,lb,extent) \
688:      (*(lb) = 0, *(extent) = MPI_sizeof((datatype)), MPI_SUCCESS)
689: #define MPI_Type_commit(datatype) (MPIUNI_ARG(datatype), MPI_SUCCESS)
690: #define MPI_Type_free(datatype) (*(datatype) = MPI_DATATYPE_NULL, MPI_SUCCESS)
691: #define MPI_Get_elements(status,datatype,count) \
692:      (MPIUNI_ARG(status),\
693:       MPIUNI_ARG(datatype),\
694:       MPIUNI_ARG(count),\
695:       MPIUni_Abort(MPI_COMM_WORLD,0))
696: #define MPI_Pack(inbuf,incount,datatype,outbuf,outsize,position,comm) \
697:      (MPIUNI_ARG(inbuf),\
698:       MPIUNI_ARG(incount),\
699:       MPIUNI_ARG(datatype),\
700:       MPIUNI_ARG(outbuf),\
701:       MPIUNI_ARG(outsize),\
702:       MPIUNI_ARG(position),\
703:       MPIUNI_ARG(comm),\
704:       MPIUni_Abort(MPI_COMM_WORLD,0))
705: #define MPI_Unpack(inbuf,insize,position,outbuf,outcount,datatype,comm) \
706:      (MPIUNI_ARG(inbuf),\
707:       MPIUNI_ARG(insize),\
708:       MPIUNI_ARG(position),\
709:       MPIUNI_ARG(outbuf),\
710:       MPIUNI_ARG(outcount),\
711:       MPIUNI_ARG(datatype),\
712:       MPIUNI_ARG(comm),\
713:       MPIUni_Abort(MPI_COMM_WORLD,0))
714: #define MPI_Pack_size(incount,datatype,comm,size) \
715:      (MPIUNI_ARG(incount),\
716:       MPIUNI_ARG(datatype),\
717:       MPIUNI_ARG(comm),\
718:       MPIUNI_ARG(size),\
719:       MPIUni_Abort(MPI_COMM_WORLD,0))
720: #define MPI_Barrier(comm) \
721:      (MPIUNI_ARG(comm),\
722:       MPI_SUCCESS)
723: #define MPI_Bcast(buffer,count,datatype,root,comm) \
724:      (MPIUNI_ARG(buffer),\
725:       MPIUNI_ARG(count),\
726:       MPIUNI_ARG(datatype),\
727:       MPIUNI_ARG(root),\
728:       MPIUNI_ARG(comm),\
729:       MPI_SUCCESS)
730: #define MPI_Gather(sendbuf,sendcount,sendtype,\
731:                    recvbuf,recvcount, recvtype,\
732:                    root,comm) \
733:      (MPIUNI_ARG(recvcount),\
734:       MPIUNI_ARG(root),\
735:       MPIUNI_ARG(recvtype),\
736:       MPIUNI_ARG(comm),\
737:       MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
738: #define MPI_Gatherv(sendbuf,sendcount,sendtype,\
739:                     recvbuf,recvcounts,displs,\
740:                     recvtype,root,comm) \
741:      (MPIUNI_ARG(recvcounts),\
742:       MPIUNI_ARG(displs),\
743:       MPIUNI_ARG(recvtype),\
744:       MPIUNI_ARG(root),\
745:       MPIUNI_ARG(comm),\
746:       MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
747: #define MPI_Scatter(sendbuf,sendcount,sendtype,\
748:                     recvbuf,recvcount,recvtype,\
749:                     root,comm) \
750:      (MPIUNI_ARG(sendcount),\
751:       MPIUNI_ARG(sendtype),\
752:       MPIUNI_ARG(recvbuf),\
753:       MPIUNI_ARG(recvtype),\
754:       MPIUNI_ARG(root),\
755:       MPIUNI_ARG(comm),\
756:       MPIUNI_Memcpy(recvbuf,sendbuf,(recvcount)*MPI_sizeof(recvtype)))
757: #define MPI_Scatterv(sendbuf,sendcounts,displs,\
758:                      sendtype,recvbuf,recvcount,\
759:                      recvtype,root,comm) \
760:      (MPIUNI_ARG(displs),\
761:       MPIUNI_ARG(sendtype),\
762:       MPIUNI_ARG(sendcounts),\
763:       MPIUNI_ARG(root),\
764:       MPIUNI_ARG(comm),\
765:       MPIUNI_Memcpy(recvbuf,sendbuf,(recvcount)*MPI_sizeof(recvtype)))
766: #define MPI_Allgather(sendbuf,sendcount,sendtype,\
767:                      recvbuf,recvcount,recvtype,comm) \
768:      (MPIUNI_ARG(recvcount),\
769:       MPIUNI_ARG(recvtype),\
770:       MPIUNI_ARG(comm),\
771:       MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
772: #define MPI_Allgatherv(sendbuf,sendcount,sendtype,\
773:      recvbuf,recvcounts,displs,recvtype,comm) \
774:      (MPIUNI_ARG(recvcounts),\
775:       MPIUNI_ARG(displs),\
776:       MPIUNI_ARG(recvtype),\
777:       MPIUNI_ARG(comm),\
778:       MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
779: #define MPI_Alltoall(sendbuf,sendcount,sendtype,\
780:                      recvbuf,recvcount,recvtype,comm) \
781:      (MPIUNI_ARG(recvcount),\
782:       MPIUNI_ARG(recvtype),\
783:       MPIUNI_ARG(comm),\
784:       MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
785: #define MPI_Alltoallv(sendbuf,sendcounts,sdispls,sendtype,\
786:                       recvbuf,recvcounts,rdispls,recvtype,comm) \
787:      (MPIUNI_ARG(sendbuf),\
788:       MPIUNI_ARG(sendcounts),\
789:       MPIUNI_ARG(sdispls),\
790:       MPIUNI_ARG(sendtype),\
791:       MPIUNI_ARG(recvbuf),\
792:       MPIUNI_ARG(recvcounts),\
793:       MPIUNI_ARG(rdispls),\
794:       MPIUNI_ARG(recvtype),\
795:       MPIUNI_ARG(comm),\
796:       MPIUni_Abort(MPI_COMM_WORLD,0))
797: #define MPI_Alltoallw(sendbuf,sendcounts,sdispls,sendtypes,\
798:                       recvbuf,recvcounts,rdispls,recvtypes,comm) \
799:      (MPIUNI_ARG(sendbuf),\
800:       MPIUNI_ARG(sendcounts),\
801:       MPIUNI_ARG(sdispls),\
802:       MPIUNI_ARG(sendtypes),\
803:       MPIUNI_ARG(recvbuf),\
804:       MPIUNI_ARG(recvcount),\
805:       MPIUNI_ARG(rdispls),\
806:       MPIUNI_ARG(recvtypes),\
807:       MPIUNI_ARG(comm),\
808:       MPIUni_Abort(MPI_COMM_WORLD,0))
809: #define MPI_Reduce(sendbuf,recvbuf,count,datatype,op,root,comm) \
810:      (MPIUNI_ARG(op),\
811:       MPIUNI_ARG(root),\
812:       MPIUNI_ARG(comm),\
813:       MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)))
814: #define MPI_Allreduce(sendbuf, recvbuf,count,datatype,op,comm) \
815:      (MPIUNI_ARG(op),\
816:       MPIUNI_ARG(comm),\
817:       MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)))
818: #define MPI_Scan(sendbuf, recvbuf,count,datatype,op,comm) \
819:      (MPIUNI_ARG(op),\
820:       MPIUNI_ARG(comm),\
821:       MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)))
822: #define MPI_Exscan(sendbuf,recvbuf,count,datatype,op,comm) \
823:      (MPIUNI_ARG(sendbuf),\
824:       MPIUNI_ARG(recvbuf),\
825:       MPIUNI_ARG(count),\
826:       MPIUNI_ARG(datatype),\
827:       MPIUNI_ARG(op),\
828:       MPIUNI_ARG(comm),\
829:       MPI_SUCCESS)
830: #define MPI_Reduce_scatter(sendbuf,recvbuf,recvcounts,datatype,op,comm) \
831:      (MPIUNI_ARG(sendbuf),\
832:       MPIUNI_ARG(recvbuf),\
833:       MPIUNI_ARG(recvcounts),\
834:       MPIUNI_ARG(datatype),\
835:       MPIUNI_ARG(op),\
836:       MPIUNI_ARG(comm),\
837:       MPIUni_Abort(MPI_COMM_WORLD,0))

839: #define MPI_Op_create(function,commute,op) \
840:      (MPIUNI_ARG(function),\
841:       MPIUNI_ARG(commute),\
842:       MPIUNI_ARG(op),\
843:       MPI_SUCCESS)
844: #define MPI_Op_free(op) \
845:      (*(op) = MPI_OP_NULL, MPI_SUCCESS)

847: #define MPI_Group_size(group,size) \
848:   (MPIUNI_ARG(group),\
849:    *(size)=1,\
850:    MPI_SUCCESS)
851: #define MPI_Group_rank(group,rank) \
852:   (MPIUNI_ARG(group),\
853:    *(rank)=0,\
854:    MPI_SUCCESS)
855: #define MPI_Group_translate_ranks(group1,n,ranks1,group2,ranks2) \
856:      (MPIUNI_ARG(group1),\
857:       MPIUNI_ARG(group2),\
858:       MPIUNI_Memcpy((ranks2),(ranks1),(n)*sizeof(int)))
859: #define MPI_Group_compare(group1,group2,result) \
860:     (MPIUNI_ARG(group1),\
861:      MPIUNI_ARG(group2),\
862:      *(result)=1,\
863:      MPI_SUCCESS)
864: #define MPI_Group_union(group1,group2,newgroup) MPI_SUCCESS
865: #define MPI_Group_intersection(group1,group2,newgroup) MPI_SUCCESS
866: #define MPI_Group_difference(group1,group2,newgroup) MPI_SUCCESS
867: #define MPI_Group_excl(group,n,ranks,newgroup) MPI_SUCCESS
868: #define MPI_Group_range_incl(group,n,ranges,newgroup) MPI_SUCCESS
869: #define MPI_Group_range_excl(group,n,ranges,newgroup) MPI_SUCCESS
870: #define MPI_Group_free(group) \
871:      (*(group) = MPI_GROUP_NULL, MPI_SUCCESS)

873: #define MPI_Comm_compare(comm1,comm2,result) \
874:      (MPIUNI_ARG(comm1),\
875:       MPIUNI_ARG(comm2),\
876:       *(result)=MPI_IDENT,\
877:       MPI_SUCCESS)
878: #define MPI_Comm_split(comm,color,key,newcomm) \
879:      (MPIUNI_ARG(color),\
880:       MPIUNI_ARG(key),\
881:       MPI_Comm_dup(comm,newcomm))
882: #define MPI_Comm_test_inter(comm,flag) (*(flag)=1, MPI_SUCCESS)
883: #define MPI_Comm_remote_size(comm,size) (*(size)=1 ,MPI_SUCCESS)
884: #define MPI_Comm_remote_group(comm,group) MPI_SUCCESS
885: #define MPI_Intercomm_create(local_comm,local_leader,peer_comm,\
886:      remote_leader,tag,newintercomm) MPI_SUCCESS
887: #define MPI_Intercomm_merge(intercomm,high,newintracomm) MPI_SUCCESS
888: #define MPI_Topo_test(comm,flag) MPI_SUCCESS
889: #define MPI_Cart_create(comm_old,ndims,dims,periods,\
890:      reorder,comm_cart) MPIUni_Abort(MPI_COMM_WORLD,0)
891: #define MPI_Dims_create(nnodes,ndims,dims) MPIUni_Abort(MPI_COMM_WORLD,0)
892: #define MPI_Graph_create(comm,a,b,c,d,e) MPIUni_Abort(MPI_COMM_WORLD,0)
893: #define MPI_Graphdims_Get(comm,nnodes,nedges) MPIUni_Abort(MPI_COMM_WORLD,0)
894: #define MPI_Graph_get(comm,a,b,c,d) MPIUni_Abort(MPI_COMM_WORLD,0)
895: #define MPI_Cartdim_get(comm,ndims) MPIUni_Abort(MPI_COMM_WORLD,0)
896: #define MPI_Cart_get(comm,maxdims,dims,periods,coords) \
897:      MPIUni_Abort(MPI_COMM_WORLD,0)
898: #define MPI_Cart_rank(comm,coords,rank) MPIUni_Abort(MPI_COMM_WORLD,0)
899: #define MPI_Cart_coords(comm,rank,maxdims,coords) \
900:      MPIUni_Abort(MPI_COMM_WORLD,0)
901: #define MPI_Graph_neighbors_count(comm,rank,nneighbors) \
902:      MPIUni_Abort(MPI_COMM_WORLD,0)
903: #define MPI_Graph_neighbors(comm,rank,maxneighbors,neighbors) \
904:      MPIUni_Abort(MPI_COMM_WORLD,0)
905: #define MPI_Cart_shift(comm,direction,disp,rank_source,rank_dest) \
906:      MPIUni_Abort(MPI_COMM_WORLD,0)
907: #define MPI_Cart_sub(comm,remain_dims,newcomm) MPIUni_Abort(MPI_COMM_WORLD,0)
908: #define MPI_Cart_map(comm,ndims,dims,periods,newrank) MPIUni_Abort(MPI_COMM_WORLD,0)
909: #define MPI_Graph_map(comm,a,b,c,d) MPIUni_Abort(MPI_COMM_WORLD,0)

911: #define MPI_Get_processor_name(name,result_len)                         \
912:      (*(result_len) = 9,MPIUNI_Memcpy(name,"localhost",10*sizeof(char)))
913: #define MPI_Errhandler_create(function,errhandler) \
914:      (MPIUNI_ARG(function),\
915:       *(errhandler) = MPI_ERRORS_RETURN,\
916:       MPI_SUCCESS)
917: #define MPI_Errhandler_set(comm,errhandler) \
918:      (MPIUNI_ARG(comm),\
919:       MPIUNI_ARG(errhandler),\
920:       MPI_SUCCESS)
921: #define MPI_Errhandler_get(comm,errhandler) \
922:      (MPIUNI_ARG(comm),\
923:       (*errhandler) = MPI_ERRORS_RETURN,\
924:       MPI_SUCCESS)
925: #define MPI_Errhandler_free(errhandler) \
926:      (*(errhandler) = MPI_ERRHANDLER_NULL,\
927:       MPI_SUCCESS)
928: #define MPI_Error_string(errorcode,string,result_len) \
929:      (MPIUNI_ARG(errorcode),\
930:       *(result_len) = 9,\
931:       MPIUNI_Memcpy(string,"MPI error",10*sizeof(char)))
932: #define MPI_Error_class(errorcode,errorclass) \
933:      (*(errorclass) = errorcode, MPI_SUCCESS)
934: #define MPI_Wtick() 1.0
935: #define MPI_Pcontrol(level) MPI_SUCCESS

937: /* MPI-IO additions */

939: typedef int MPI_File;
940: #define MPI_FILE_NULL 0

942: typedef int MPI_Offset;

944: #define MPI_MODE_RDONLY  0
945: #define MPI_MODE_WRONLY  0
946: #define MPI_MODE_CREATE  0

948: #define MPI_File_open(comm,filename,amode,info,mpi_fh) \
949:   (MPIUNI_ARG(comm),\
950:    MPIUNI_ARG(filename),\
951:    MPIUNI_ARG(amode),\
952:    MPIUNI_ARG(info),\
953:    MPIUNI_ARG(mpi_fh),\
954:    MPIUni_Abort(MPI_COMM_WORLD,0))

956: #define MPI_File_close(mpi_fh) \
957:   (MPIUNI_ARG(mpi_fh),\
958:    MPIUni_Abort(MPI_COMM_WORLD,0))

960: #define MPI_File_set_view(mpi_fh,disp,etype,filetype,datarep,info) \
961:   (MPIUNI_ARG(mpi_fh),\
962:    MPIUNI_ARG(disp),\
963:    MPIUNI_ARG(etype),\
964:    MPIUNI_ARG(filetype),\
965:    MPIUNI_ARG(datarep),\
966:    MPIUNI_ARG(info),\
967:    MPIUni_Abort(MPI_COMM_WORLD,0))

969: #define MPI_File_write_all(mpi_fh,buf,count,datatype,status) \
970:   (MPIUNI_ARG(mpi_fh),\
971:    MPIUNI_ARG(buf),\
972:    MPIUNI_ARG(count),\
973:    MPIUNI_ARG(datatype),\
974:    MPIUNI_ARG(status),\
975:    MPIUni_Abort(MPI_COMM_WORLD,0))

977: #define MPI_File_read_all(mpi_fh,buf,count,datatype,status) \
978:   (MPIUNI_ARG(mpi_fh),\
979:    MPIUNI_ARG(buf),\
980:    MPIUNI_ARG(count),\
981:    MPIUNI_ARG(datatype),\
982:    MPIUNI_ARG(status),\
983:    MPIUni_Abort(MPI_COMM_WORLD,0))

985:   /* called from PetscInitialize() - so return success */
986: #define MPI_Register_datarep(name,read_conv_fn,write_conv_fn,extent_fn,state) \
987:   (MPIUNI_ARG(name),\
988:    MPIUNI_ARG(read_conv_fn),\
989:    MPIUNI_ARG(write_conv_fn),\
990:    MPIUNI_ARG(extent_fn),\
991:    MPIUNI_ARG(state),\
992:    MPI_SUCCESS)

994: #define MPI_Type_create_subarray(ndims,array_of_sizes,array_of_subsizes,array_of_starts,order,oldtype,newtype) \
995:   (MPIUNI_ARG(ndims),\
996:    MPIUNI_ARG(array_of_sizes),\
997:    MPIUNI_ARG(array_of_subsizes),\
998:    MPIUNI_ARG(array_of_starts),\
999:    MPIUNI_ARG(order),\
1000:    MPIUNI_ARG(oldtype),\
1001:    MPIUNI_ARG(newtype),\
1002:    MPIUni_Abort(MPI_COMM_WORLD,0))

1004: #define MPI_Type_create_resized(oldtype,lb,extent,newtype) \
1005:   (MPIUNI_ARG(oldtype),\
1006:    MPIUNI_ARG(lb),\
1007:    MPIUNI_ARG(extent),\
1008:    MPIUNI_ARG(newtype),\
1009:    MPIUni_Abort(MPI_COMM_WORLD,0))

1011: #if defined(__cplusplus)
1012: }
1013: #endif
1014: #endif