1 /* 2 * Copyright (C) by Argonne National Laboratory 3 * See COPYRIGHT in top-level directory 4 */ 5 6 #ifndef MPIR_OBJECTS_H_INCLUDED 7 #define MPIR_OBJECTS_H_INCLUDED 8 9 #include "mpichconf.h" 10 11 /*TDSOverview.tex 12 13 MPI has a number of data structures, most of which are represented by 14 an opaque handle in an MPI program. In the MPICH implementation of MPI, 15 these handles are represented 16 as integers; this makes implementation of the C/Fortran handle transfer 17 calls (part of MPI-2) easy. 18 19 MPIR objects 20 are allocated by a common set of object allocation functions. 21 22 where 'objmem' is a pointer to a memory allocation object that knows 23 enough to allocate objects, including the 24 size of the object and the location of preallocated memory, as well 25 as the type of memory allocator. By providing the routines to allocate and 26 free the memory, we make it easy to use the same interface to allocate both 27 local and shared memory for objects (always using the same kind for each 28 type of object). 29 30 The names create/destroy were chosen because they are different from 31 new/delete (C++ operations) and malloc/free. 32 Any name choice will have some conflicts with other uses, of course. 33 34 Reference Counts: 35 Many MPI objects have reference count semantics. 36 The semantics of MPI require that many objects that have been freed by the 37 user 38 (e.g., with 'MPI_Type_free' or 'MPI_Comm_free') remain valid until all 39 pending 40 references to that object (e.g., by an 'MPI_Irecv') are complete. There 41 are several ways to implement this; MPICH uses `reference counts` in the 42 objects. To support the 'MPI_THREAD_MULTIPLE' level of thread-safety, these 43 reference counts must be accessed and updated atomically. 44 A reference count for 45 `any` object can be incremented (atomically) 46 with 'MPIR_Object_add_ref(objptr)' 47 and decremented with 'MPIR_Object_release_ref(objptr,newval_ptr)'. 48 These have been designed so that then can be implemented as inlined 49 macros rather than function calls, even in the multithreaded case, and 50 can use special processor instructions that guarantee atomicity to 51 avoid thread locks. 52 The decrement routine sets the value pointed at by 'inuse_ptr' to 0 if 53 the postdecrement value of the reference counter is zero, and to a non-zero 54 value otherwise. If this value is zero, then the routine that decremented 55 the 56 reference count should free the object. This may be as simple as 57 calling 'destroy' (for simple objects with no other allocated 58 storage) or may require calling a separate routine to destroy the object. 59 Because MPI uses 'MPI_xxx_free' to both decrement the reference count and 60 free the object if the reference count is zero, we avoid the use of 'free' 61 in the MPIR destruction routines. 62 63 The 'inuse_ptr' approach is used rather than requiring the post-decrement 64 value because, for reference-count semantics, all that is necessary is 65 to know when the reference count reaches zero, and this can sometimes 66 be implemented more cheaply that requiring the post-decrement value (e.g., 67 on IA32, there is an instruction for this operation). 68 69 Question: 70 Should we state that this is a macro so that we can use a register for 71 the output value? That avoids a store. Alternately, have the macro 72 return the value as if it was a function? 73 74 Structure Definitions: 75 The structure definitions in this document define `only` that part of 76 a structure that may be used by code that is making use of the ADI. 77 Thus, some structures, such as 'MPIR_Comm', have many defined fields; 78 these are used to support MPI routines such as 'MPI_Comm_size' and 79 'MPI_Comm_remote_group'. Other structures may have few or no defined 80 members; these structures have no fields used outside of the ADI. 81 In C++ terms, all members of these structures are 'private'. 82 83 For the initial implementation, we expect that the structure definitions 84 will be designed for the multimethod device. However, all items that are 85 specific to a particular device (including the multi-method device) 86 will be placed at the end of the structure; 87 the document will clearly identify the members that all implementations 88 will provide. This simplifies much of the code in both the ADI and the 89 implementation of the MPI routines because structure member can be directly 90 accessed rather than using some macro or C++ style method interface. 91 92 T*/ 93 94 /*TOpaqOverview.tex 95 MPI Opaque Objects: 96 97 MPI Opaque objects such as 'MPI_Comm' or 'MPI_Datatype' are specified by 98 integers (in the MPICH implementation); the MPI standard calls these 99 handles. 100 Out of range values are invalid; the value 0 is reserved. 101 For most (with the possible exception of 102 'MPI_Request' for performance reasons) MPI Opaque objects, the integer 103 encodes both the kind of object (allowing runtime tests to detect a datatype 104 passed where a communicator is expected) and important properties of the 105 object. Even the 'MPI_xxx_NULL' values should be encoded so that 106 different null handles can be distinguished. The details of the encoding 107 of the handles is covered in more detail in the MPICH Design Document. 108 For the most part, the ADI uses pointers to the underlying structures 109 rather than the handles themselves. However, each structure contains an 110 'handle' field that is the corresponding integer handle for the MPI object. 111 112 MPIR objects are not opaque. 113 114 T*/ 115 116 /* Known MPI object types. These are used for both the error handlers 117 and for the handles. This is a 4 bit value. 0 is reserved for so 118 that all-zero handles can be flagged as an error. */ 119 /*E 120 MPII_Object_kind - Object kind (communicator, window, or file) 121 122 Notes: 123 This enum is used by keyvals and errhandlers to indicate the type of 124 object for which MPI opaque types the data is valid. These are defined 125 as bits to allow future expansion to the case where an object is value for 126 multiple types (for example, we may want a universal error handler for 127 errors return). This is also used to indicate the type of MPI object a 128 MPI handle represents. It is an enum because only this applies only the 129 the MPI and internal MPICH objects. 130 131 'MPIR_VCONN' is a virtual connection; while this is not part of the 132 overall ADI3 design, an object that manages connections to other processes 133 is a common need, and 'MPIR_VCONN' may be used for that. 134 135 Module: 136 Attribute-DS 137 E*/ 138 typedef enum MPII_Object_kind { 139 MPIR_COMM = 0x1, 140 MPIR_GROUP = 0x2, 141 MPIR_DATATYPE = 0x3, 142 MPIR_FILE = 0x4, /* only used obliquely inside MPIR_Errhandler objs */ 143 MPIR_ERRHANDLER = 0x5, 144 MPIR_OP = 0x6, 145 MPIR_INFO = 0x7, 146 MPIR_WIN = 0x8, 147 MPIR_KEYVAL = 0x9, 148 MPIR_ATTR = 0xa, 149 MPIR_REQUEST = 0xb, 150 MPIR_VCONN = 0xc, 151 MPIR_GREQ_CLASS = 0xd, 152 MPIR_INTERNAL = 0xe, /* used for various MPICH internal objects that 153 * do not require a handle */ 154 } MPII_Object_kind; 155 156 157 #define HANDLE_MPI_KIND_SHIFT 26 158 #define HANDLE_GET_MPI_KIND(a) (((a)&0x3c000000) >> HANDLE_MPI_KIND_SHIFT) 159 #define HANDLE_SET_MPI_KIND(a,kind) ((a) | ((kind) << HANDLE_MPI_KIND_SHIFT)) 160 161 /* returns the name of the handle kind for debugging/logging purposes */ 162 const char *MPIR_Handle_get_kind_str(int kind); 163 164 /* Handle types. These are really 2 bits */ 165 #define HANDLE_KIND_INVALID 0x0 166 #define HANDLE_KIND_BUILTIN 0x1 167 #define HANDLE_KIND_DIRECT 0x2 168 #define HANDLE_KIND_INDIRECT 0x3 169 /* Mask assumes that ints are at least 4 bytes */ 170 #define HANDLE_KIND_MASK 0xc0000000 171 #define HANDLE_KIND_SHIFT 30 172 #define HANDLE_GET_KIND(a) (((unsigned)(a)&HANDLE_KIND_MASK)>>HANDLE_KIND_SHIFT) 173 #define HANDLE_SET_KIND(a,kind) ((a)|((kind)<<HANDLE_KIND_SHIFT)) 174 #define HANDLE_IS_BUILTIN(a) (HANDLE_GET_KIND((a)) == HANDLE_KIND_BUILTIN) 175 176 /* For indirect, the remainder of the handle has a block and index within that 177 * block */ 178 #define HANDLE_INDIRECT_SHIFT 12 179 #define HANDLE_BLOCK(a) (((a)& 0x03FFF000) >> HANDLE_INDIRECT_SHIFT) 180 #define HANDLE_BLOCK_INDEX(a) ((a) & 0x00000FFF) 181 182 /* Number of blocks is between 1 and 16384 */ 183 #if defined MPID_HANDLE_NUM_BLOCKS 184 #define HANDLE_NUM_BLOCKS MPID_HANDLE_NUM_BLOCKS 185 #else 186 #define HANDLE_NUM_BLOCKS 8192 187 #endif /* MPID_HANDLE_NUM_BLOCKS */ 188 189 /* Number of objects in a block is bewtween 1 and 4096 (each obj has an index 190 * within its block) */ 191 #if defined MPID_HANDLE_NUM_INDICES 192 #define HANDLE_NUM_INDICES MPID_HANDLE_NUM_INDICES 193 #else 194 #define HANDLE_NUM_INDICES 1024 195 #endif /* MPID_HANDLE_NUM_INDICES */ 196 197 /* For direct, the remainder of the handle is the index into a predefined 198 block */ 199 #define HANDLE_MASK 0x03FFFFFF 200 #define HANDLE_INDEX(a) ((a)& HANDLE_MASK) 201 202 #if defined (MPL_USE_DBG_LOGGING) 203 extern MPL_dbg_class MPIR_DBG_HANDLE; 204 #endif /* MPL_USE_DBG_LOGGING */ 205 206 /* ------------------------------------------------------------------------- */ 207 /* reference counting macros */ 208 209 /* If we're debugging the handles (including reference counts), 210 add an additional test. The check on a max refcount helps to 211 detect objects whose refcounts are not decremented as many times 212 as they are incremented */ 213 #ifdef MPICH_DEBUG_HANDLES 214 #define MPICH_DEBUG_MAX_REFCOUNT 64 215 #define HANDLE_CHECK_REFCOUNT(objptr_,local_ref_count_,op_) \ 216 do { \ 217 if (local_ref_count_ > MPICH_DEBUG_MAX_REFCOUNT || local_ref_count_ < 0) \ 218 { \ 219 MPL_DBG_MSG_FMT(MPIR_DBG_HANDLE,TYPICAL,(MPL_DBG_FDEST, \ 220 "Invalid refcount (%d) in %p (0x%08x) %s", \ 221 local_ref_count_, (objptr_), (objptr_)->handle, op_)); \ 222 } \ 223 MPIR_Assert(local_ref_count_ >= 0); \ 224 } while (0) 225 #else 226 #define HANDLE_CHECK_REFCOUNT(objptr_,local_ref_count_,op_) \ 227 MPIR_Assert(local_ref_count_ >= 0) 228 #endif 229 230 #define HANDLE_LOG_REFCOUNT_CHANGE(objptr_, new_refcount_, action_str_) \ 231 MPL_DBG_MSG_FMT(MPIR_DBG_HANDLE,TYPICAL,(MPL_DBG_FDEST, \ 232 "%s %p (0x%08x kind=%s) refcount to %d", \ 233 (action_str_), \ 234 (objptr_), \ 235 (objptr_)->handle, \ 236 MPIR_Handle_get_kind_str(HANDLE_GET_MPI_KIND((objptr_)->handle)), \ 237 new_refcount_)) 238 239 /* The "_always" versions of these macros unconditionally manipulate the 240 * reference count of the given object. They exist to permit an optimization 241 * of not reference counting predefined objects. */ 242 243 /* The MPL_DBG... statements are macros that vanish unless 244 --enable-g=log is selected. HANDLE_CHECK_REFCOUNT is 245 defined above, and adds an additional sanity check for the refcounts 246 */ 247 #if MPICH_THREAD_REFCOUNT == MPICH_REFCOUNT__NONE 248 249 typedef int Handle_ref_count; 250 251 #define MPIR_Object_set_ref(objptr_,val) \ 252 do { \ 253 (objptr_)->ref_count = val; \ 254 HANDLE_LOG_REFCOUNT_CHANGE(objptr_, val, "set"); \ 255 } while (0) 256 257 /* must be used with care, since there is no synchronization for this read */ 258 #define MPIR_Object_get_ref(objptr_) \ 259 ((objptr_)->ref_count) 260 261 #define MPIR_Object_add_ref_always(objptr_) \ 262 do { \ 263 (objptr_)->ref_count++; \ 264 HANDLE_LOG_REFCOUNT_CHANGE(objptr_, (objptr_)->ref_count, "incr"); \ 265 HANDLE_CHECK_REFCOUNT(objptr_,(objptr_)->ref_count,"incr"); \ 266 } while (0) 267 #define MPIR_Object_release_ref_always(objptr_,inuse_ptr) \ 268 do { \ 269 *(inuse_ptr) = --((objptr_)->ref_count); \ 270 HANDLE_LOG_REFCOUNT_CHANGE(objptr_, (objptr_)->ref_count, "decr"); \ 271 HANDLE_CHECK_REFCOUNT(objptr_,(objptr_)->ref_count,"decr"); \ 272 } while (0) 273 274 #elif MPICH_THREAD_REFCOUNT == MPICH_REFCOUNT__LOCKFREE 275 276 typedef MPL_atomic_int_t Handle_ref_count; 277 278 #define MPIR_Object_set_ref(objptr_,val) \ 279 do { \ 280 MPL_atomic_store_int(&(objptr_)->ref_count, val); \ 281 HANDLE_LOG_REFCOUNT_CHANGE(objptr_, val, "set"); \ 282 } while (0) 283 284 /* must be used with care, since there is no synchronization for this read */ 285 #define MPIR_Object_get_ref(objptr_) \ 286 (MPL_atomic_load_int(&(objptr_)->ref_count)) 287 288 #ifdef MPICH_DEBUG_HANDLES 289 /* 290 For non-debug builds, we use non-fetch atomics here, because they may be 291 slightly faster than fetch versions, and we don't care about exact value 292 of the refcount (other than whether it hit zero.) 293 For debug builds (when MPICH_DEBUG_HANDLES is set), we need fetch atomics 294 in order to know the correct refcount value when multiple threads present. 295 */ 296 297 /* MPICH_THREAD_REFCOUNT == MPICH_REFCOUNT__LOCKFREE && MPICH_DEBUG_HANDLES */ 298 #define MPIR_Object_add_ref_always(objptr_) \ 299 do { \ 300 int new_ref_; \ 301 new_ref_ = MPL_atomic_fetch_add_int(&((objptr_)->ref_count), 1) + 1; \ 302 HANDLE_LOG_REFCOUNT_CHANGE(objptr_, new_ref_, "incr"); \ 303 HANDLE_CHECK_REFCOUNT(objptr_,new_ref_,"incr"); \ 304 } while (0) 305 #define MPIR_Object_release_ref_always(objptr_,inuse_ptr) \ 306 do { \ 307 int new_ref_ = MPL_atomic_fetch_sub_int(&((objptr_)->ref_count), 1) - 1; \ 308 *(inuse_ptr) = new_ref_; \ 309 HANDLE_LOG_REFCOUNT_CHANGE(objptr_, new_ref_, "decr"); \ 310 HANDLE_CHECK_REFCOUNT(objptr_,new_ref_,"decr"); \ 311 } while (0) 312 #else /* MPICH_DEBUG_HANDLES */ 313 /* MPICH_THREAD_REFCOUNT == MPICH_REFCOUNT__LOCKFREE && !MPICH_DEBUG_HANDLES */ 314 #define MPIR_Object_add_ref_always(objptr_) \ 315 do { \ 316 MPL_atomic_fetch_add_int(&((objptr_)->ref_count), 1); \ 317 } while (0) 318 #define MPIR_Object_release_ref_always(objptr_,inuse_ptr) \ 319 do { \ 320 int new_ref_ = MPL_atomic_fetch_sub_int(&((objptr_)->ref_count), 1) - 1; \ 321 *(inuse_ptr) = new_ref_; \ 322 } while (0) 323 #endif /* MPICH_DEBUG_HANDLES */ 324 #else 325 #error invalid value for MPICH_THREAD_REFCOUNT 326 #endif 327 328 /* TODO someday we should probably always suppress predefined object refcounting, 329 * but we don't have total confidence in it yet. So until we gain sufficient 330 * confidence, this is a configurable option. */ 331 #if defined(MPICH_THREAD_SUPPRESS_PREDEFINED_REFCOUNTS) 332 333 /* The assumption here is that objects with handles of type HANDLE_KIND_BUILTIN 334 * will be created/destroyed only at MPI_Init/MPI_Finalize time and don't need 335 * to be reference counted. This can be a big performance win on some 336 * platforms, such as BG/P. 337 * 338 * It is also assumed that any object being reference counted via these macros 339 * will have a valid value in the handle field, even if it is 340 * HANDLE_SET_KIND(0, HANDLE_KIND_INVALID) */ 341 /* TODO profile and examine the assembly that is generated for this if () on Blue 342 * Gene (and elsewhere). We may need to mark it unlikely(). */ 343 #define MPIR_Object_add_ref(objptr_) \ 344 do { \ 345 int handle_kind_ = HANDLE_GET_KIND((objptr_)->handle); \ 346 if (unlikely(handle_kind_ != HANDLE_KIND_BUILTIN)) { \ 347 MPIR_Object_add_ref_always((objptr_)); \ 348 } \ 349 else { \ 350 MPL_DBG_MSG_FMT(MPIR_DBG_HANDLE,TYPICAL,(MPL_DBG_FDEST, \ 351 "skipping add_ref on %p (0x%08x kind=%s) refcount=%d", \ 352 (objptr_), \ 353 (objptr_)->handle, \ 354 MPIR_Handle_get_kind_str(HANDLE_GET_MPI_KIND((objptr_)->handle)), \ 355 MPIR_Object_get_ref(objptr_))) \ 356 } \ 357 } while (0) 358 #define MPIR_Object_release_ref(objptr_,inuse_ptr_) \ 359 do { \ 360 int handle_kind_ = HANDLE_GET_KIND((objptr_)->handle); \ 361 if (unlikely(handle_kind_ != HANDLE_KIND_BUILTIN)) { \ 362 MPIR_Object_release_ref_always((objptr_), (inuse_ptr_)); \ 363 } \ 364 else { \ 365 *(inuse_ptr_) = 1; \ 366 MPL_DBG_MSG_FMT(MPIR_DBG_HANDLE,TYPICAL,(MPL_DBG_FDEST, \ 367 "skipping release_ref on %p (0x%08x kind=%s) refcount=%d", \ 368 (objptr_), \ 369 (objptr_)->handle, \ 370 MPIR_Handle_get_kind_str(HANDLE_GET_MPI_KIND((objptr_)->handle)), \ 371 MPIR_Object_get_ref(objptr_))) \ 372 } \ 373 } while (0) 374 375 #else /* !defined(MPICH_THREAD_SUPPRESS_PREDEFINED_REFCOUNTS) */ 376 377 /* the base case, where we just always manipulate the reference counts */ 378 #define MPIR_Object_add_ref(objptr_) \ 379 MPIR_Object_add_ref_always((objptr_)) 380 #define MPIR_Object_release_ref(objptr_,inuse_ptr_) \ 381 MPIR_Object_release_ref_always((objptr_),(inuse_ptr_)) 382 383 #endif 384 385 386 /* end reference counting macros */ 387 /* ------------------------------------------------------------------------- */ 388 389 /* This macro defines structure fields that are needed in order to use the 390 * reference counting and object allocation macros/functions in MPICH. This 391 * allows us to avoid casting and violating C's strict aliasing rules in most 392 * cases. 393 * 394 * All *active* (in use) objects have the handle as the first value; objects 395 * with referene counts have the reference count as the second value. See 396 * MPIR_Object_add_ref and MPIR_Object_release_ref. 397 * 398 * NOTE: This macro *must* be invoked as the very first element of the structure! */ 399 #define MPIR_OBJECT_HEADER \ 400 int handle; \ 401 Handle_ref_count ref_count /*semicolon intentionally omitted */ 402 403 /* ALL objects have the handle as the first value. */ 404 /* Inactive (unused and stored on the appropriate avail list) objects 405 have MPIR_Handle_common as the head */ 406 typedef struct MPIR_Handle_common { 407 MPIR_OBJECT_HEADER; 408 void *next; /* Free handles use this field to point to the next 409 * free object */ 410 } MPIR_Handle_common; 411 412 /* This type contains all of the data, except for the direct array, 413 used by the object allocators. */ 414 typedef struct MPIR_Object_alloc_t { 415 MPIR_Handle_common *avail; /* Next available object */ 416 int initialized; /* */ 417 void **indirect; /* Pointer to indirect object blocks */ 418 int indirect_size; /* Number of allocated indirect blocks */ 419 MPII_Object_kind kind; /* Kind of object this is for */ 420 int size; /* Size of an individual object */ 421 void *direct; /* Pointer to direct block, used 422 * for allocation */ 423 int direct_size; /* Size of direct block */ 424 void *lock; /* lower-layer may register a lock to use. This is 425 * mostly for multipool requests. For other objects 426 * or not per-vci thread granularity, this lock 427 * pointer is ignored. Ref. mpir_request.h. 428 * NOTE: it is `void *` because mutex type not defined yet. 429 */ 430 } MPIR_Object_alloc_t; 431 static inline void *MPIR_Handle_obj_alloc(MPIR_Object_alloc_t *); 432 static inline void *MPIR_Handle_obj_alloc_unsafe(MPIR_Object_alloc_t *, 433 int max_blocks, int max_indices); 434 static inline void MPIR_Handle_obj_free(MPIR_Object_alloc_t *, void *); 435 static inline void MPIR_Handle_obj_free_unsafe(MPIR_Object_alloc_t *, void *); 436 static inline void *MPIR_Handle_get_ptr_indirect(int, MPIR_Object_alloc_t *); 437 438 439 /* Convert Handles to objects for MPI types that have predefined objects */ 440 /* TODO examine generated assembly for this construct, it's probably suboptimal 441 * on Blue Gene. An if/else if/else might help the compiler out. It also lets 442 * us hint that one case is likely(), usually the BUILTIN case. */ 443 #define MPIR_Getb_ptr(kind,KIND,a,bmsk,ptr) \ 444 { \ 445 switch (HANDLE_GET_KIND(a)) { \ 446 case HANDLE_KIND_BUILTIN: \ 447 MPIR_Assert(((a)&(bmsk)) < MPIR_##KIND##_N_BUILTIN); \ 448 ptr=MPIR_##kind##_builtin+((a)&(bmsk)); \ 449 break; \ 450 case HANDLE_KIND_DIRECT: \ 451 ptr=MPIR_##kind##_direct+HANDLE_INDEX(a); \ 452 break; \ 453 case HANDLE_KIND_INDIRECT: \ 454 ptr=((MPIR_##kind*) \ 455 MPIR_Handle_get_ptr_indirect(a,&MPIR_##kind##_mem)); \ 456 break; \ 457 case HANDLE_KIND_INVALID: \ 458 default: \ 459 ptr=0; \ 460 break; \ 461 } \ 462 } 463 464 /* Convert handles to objects for MPI types that do _not_ have any predefined 465 objects */ 466 #define MPIR_Get_ptr(kind,a,ptr) \ 467 { \ 468 switch (HANDLE_GET_KIND(a)) { \ 469 case HANDLE_KIND_DIRECT: \ 470 ptr=MPIR_##kind##_direct+HANDLE_INDEX(a); \ 471 break; \ 472 case HANDLE_KIND_INDIRECT: \ 473 ptr=((MPIR_##kind*) \ 474 MPIR_Handle_get_ptr_indirect(a,&MPIR_##kind##_mem)); \ 475 break; \ 476 case HANDLE_KIND_INVALID: \ 477 case HANDLE_KIND_BUILTIN: \ 478 default: \ 479 ptr=0; \ 480 break; \ 481 } \ 482 } 483 484 /* FIXME: the masks should be defined with the handle definitions instead 485 of inserted here as literals */ 486 #define MPIR_Comm_get_ptr(a,ptr) MPIR_Getb_ptr(Comm,COMM,a,0x03ffffff,ptr) 487 #define MPIR_Group_get_ptr(a,ptr) MPIR_Getb_ptr(Group,GROUP,a,0x03ffffff,ptr) 488 #define MPIR_Errhandler_get_ptr(a,ptr) MPIR_Getb_ptr(Errhandler,ERRHANDLER,a,0x3,ptr) 489 #define MPIR_Op_get_ptr(a,ptr) MPIR_Getb_ptr(Op,OP,a,0x000000ff,ptr) 490 #define MPIR_Info_get_ptr(a,ptr) MPIR_Getb_ptr(Info,INFO,a,0x03ffffff,ptr) 491 #define MPIR_Win_get_ptr(a,ptr) MPIR_Get_ptr(Win,a,ptr) 492 /* Request objects are handled differently. See mpir_request.h */ 493 #define MPIR_Grequest_class_get_ptr(a,ptr) MPIR_Get_ptr(Grequest_class,a,ptr) 494 /* Keyvals have a special format. This is roughly MPIR_Get_ptrb, but 495 the handle index is in a smaller bit field. In addition, 496 there is no storage for the builtin keyvals. 497 For the indirect case, we mask off the part of the keyval that is 498 in the bits normally used for the indirect block index. 499 */ 500 #define MPII_Keyval_get_ptr(a,ptr) \ 501 { \ 502 switch (HANDLE_GET_KIND(a)) { \ 503 case HANDLE_KIND_BUILTIN: \ 504 ptr=0; \ 505 break; \ 506 case HANDLE_KIND_DIRECT: \ 507 ptr=MPII_Keyval_direct+((a)&0x3fffff); \ 508 break; \ 509 case HANDLE_KIND_INDIRECT: \ 510 ptr=((MPII_Keyval*) \ 511 MPIR_Handle_get_ptr_indirect((a)&0xfc3fffff,&MPII_Keyval_mem)); \ 512 break; \ 513 case HANDLE_KIND_INVALID: \ 514 default: \ 515 ptr=0; \ 516 break; \ 517 } \ 518 } 519 520 #endif /* MPIR_OBJECTS_H_INCLUDED */ 521