1# Opening modes 2# ------------- 3 4MODE_RDONLY = MPI_MODE_RDONLY 5#: Read only 6 7MODE_WRONLY = MPI_MODE_WRONLY 8#: Write only 9 10MODE_RDWR = MPI_MODE_RDWR 11#: Reading and writing 12 13MODE_CREATE = MPI_MODE_CREATE 14#: Create the file if it does not exist 15 16MODE_EXCL = MPI_MODE_EXCL 17#: Error if creating file that already exists 18 19MODE_DELETE_ON_CLOSE = MPI_MODE_DELETE_ON_CLOSE 20#: Delete file on close 21 22MODE_UNIQUE_OPEN = MPI_MODE_UNIQUE_OPEN 23#: File will not be concurrently opened elsewhere 24 25MODE_SEQUENTIAL = MPI_MODE_SEQUENTIAL 26#: File will only be accessed sequentially 27 28MODE_APPEND = MPI_MODE_APPEND 29#: Set initial position of all file pointers to end of file 30 31 32# Positioning 33# ----------- 34 35SEEK_SET = MPI_SEEK_SET 36#: File pointer is set to offset 37 38SEEK_CUR = MPI_SEEK_CUR 39#: File pointer is set to the current position plus offset 40 41SEEK_END = MPI_SEEK_END 42#: File pointer is set to the end plus offset 43 44DISPLACEMENT_CURRENT = MPI_DISPLACEMENT_CURRENT 45#: Special displacement value for files opened in sequential mode 46 47DISP_CUR = MPI_DISPLACEMENT_CURRENT 48#: Convenience alias for `DISPLACEMENT_CURRENT` 49 50 51cdef class File: 52 53 """ 54 File 55 """ 56 57 def __cinit__(self, File file=None): 58 self.ob_mpi = MPI_FILE_NULL 59 if file is None: return 60 self.ob_mpi = file.ob_mpi 61 62 def __dealloc__(self): 63 if not (self.flags & PyMPI_OWNED): return 64 CHKERR( del_File(&self.ob_mpi) ) 65 66 def __richcmp__(self, other, int op): 67 if not isinstance(other, File): return NotImplemented 68 cdef File s = <File>self, o = <File>other 69 if op == Py_EQ: return (s.ob_mpi == o.ob_mpi) 70 elif op == Py_NE: return (s.ob_mpi != o.ob_mpi) 71 cdef str mod = type(self).__module__ 72 cdef str cls = type(self).__name__ 73 raise TypeError("unorderable type: '%s.%s'" % (mod, cls)) 74 75 def __bool__(self): 76 return self.ob_mpi != MPI_FILE_NULL 77 78 # File Manipulation 79 # ----------------- 80 81 @classmethod 82 def Open(cls, Intracomm comm not None, filename, 83 int amode=MODE_RDONLY, Info info=INFO_NULL): 84 """ 85 Open a file 86 """ 87 cdef char *cfilename = NULL 88 filename = asmpistr(filename, &cfilename) 89 cdef MPI_Info cinfo = arg_Info(info) 90 cdef File file = <File>File.__new__(File) 91 with nogil: CHKERR( MPI_File_open( 92 comm.ob_mpi, cfilename, amode, cinfo, &file.ob_mpi) ) 93 file_set_eh(file.ob_mpi) 94 return file 95 96 def Close(self): 97 """ 98 Close a file 99 """ 100 with nogil: CHKERR( MPI_File_close(&self.ob_mpi) ) 101 102 @classmethod 103 def Delete(cls, filename, Info info=INFO_NULL): 104 """ 105 Delete a file 106 """ 107 cdef char *cfilename = NULL 108 filename = asmpistr(filename, &cfilename) 109 cdef MPI_Info cinfo = arg_Info(info) 110 with nogil: CHKERR( MPI_File_delete(cfilename, cinfo) ) 111 112 def Set_size(self, Offset size): 113 """ 114 Sets the file size 115 """ 116 with nogil: CHKERR( MPI_File_set_size(self.ob_mpi, size) ) 117 118 def Preallocate(self, Offset size): 119 """ 120 Preallocate storage space for a file 121 """ 122 with nogil: CHKERR( MPI_File_preallocate(self.ob_mpi, size) ) 123 124 def Get_size(self): 125 """ 126 Return the file size 127 """ 128 cdef MPI_Offset size = 0 129 with nogil: CHKERR( MPI_File_get_size(self.ob_mpi, &size) ) 130 return size 131 132 property size: 133 """file size""" 134 def __get__(self): 135 return self.Get_size() 136 137 def Get_amode(self): 138 """ 139 Return the file access mode 140 """ 141 cdef int amode = 0 142 with nogil: CHKERR( MPI_File_get_amode(self.ob_mpi, &amode) ) 143 return amode 144 145 property amode: 146 """file access mode""" 147 def __get__(self): 148 return self.Get_amode() 149 150 # File Group 151 # ---------- 152 153 def Get_group(self): 154 """ 155 Return the group of processes 156 that opened the file 157 """ 158 cdef Group group = <Group>Group.__new__(Group) 159 with nogil: CHKERR( MPI_File_get_group(self.ob_mpi, &group.ob_mpi) ) 160 return group 161 162 property group: 163 """file group""" 164 def __get__(self): 165 return self.Get_group() 166 167 # File Info 168 # --------- 169 170 def Set_info(self, Info info not None): 171 """ 172 Set new values for the hints 173 associated with a file 174 """ 175 with nogil: CHKERR( MPI_File_set_info(self.ob_mpi, info.ob_mpi) ) 176 177 def Get_info(self): 178 """ 179 Return the hints for a file that 180 that are currently in use 181 """ 182 cdef Info info = <Info>Info.__new__(Info) 183 with nogil: CHKERR( MPI_File_get_info(self.ob_mpi, &info.ob_mpi) ) 184 return info 185 186 property info: 187 """file info""" 188 def __get__(self): 189 return self.Get_info() 190 def __set__(self, info): 191 self.Set_info(info) 192 193 # File Views 194 # ---------- 195 196 def Set_view(self, Offset disp=0, 197 Datatype etype=None, Datatype filetype=None, 198 object datarep=None, Info info=INFO_NULL): 199 """ 200 Set the file view 201 """ 202 cdef char *cdatarep = b"native" 203 if datarep is not None: datarep = asmpistr(datarep, &cdatarep) 204 cdef MPI_Datatype cetype = MPI_BYTE 205 if etype is not None: cetype = etype.ob_mpi 206 cdef MPI_Datatype cftype = cetype 207 if filetype is not None: cftype = filetype.ob_mpi 208 cdef MPI_Info cinfo = arg_Info(info) 209 with nogil: CHKERR( MPI_File_set_view( 210 self.ob_mpi, disp, cetype, cftype, cdatarep, cinfo) ) 211 212 def Get_view(self): 213 """ 214 Return the file view 215 """ 216 cdef MPI_Offset disp = 0 217 cdef Datatype etype = <Datatype>Datatype.__new__(Datatype) 218 cdef Datatype ftype = <Datatype>Datatype.__new__(Datatype) 219 cdef char cdatarep[MPI_MAX_DATAREP_STRING+1] 220 with nogil: CHKERR( MPI_File_get_view( 221 self.ob_mpi, &disp, &etype.ob_mpi, &ftype.ob_mpi, cdatarep) ) 222 #if builtin_Datatype(etype.ob_mpi): etype.flags = 0 223 #if builtin_Datatype(ftype.ob_mpi): ftype.flags = 0 224 cdatarep[MPI_MAX_DATAREP_STRING] = 0 # just in case 225 cdef object datarep = mpistr(cdatarep) 226 return (disp, etype, ftype, datarep) 227 228 # Data Access 229 # ----------- 230 231 # Data Access with Explicit Offsets 232 # --------------------------------- 233 234 def Read_at(self, Offset offset, buf, Status status=None): 235 """ 236 Read using explicit offset 237 """ 238 cdef _p_msg_io m = message_io_read(buf) 239 cdef MPI_Status *statusp = arg_Status(status) 240 with nogil: CHKERR( MPI_File_read_at( 241 self.ob_mpi, offset, m.buf, m.count, m.dtype, statusp) ) 242 243 def Read_at_all(self, Offset offset, buf, Status status=None): 244 """ 245 Collective read using explicit offset 246 """ 247 cdef _p_msg_io m = message_io_read(buf) 248 cdef MPI_Status *statusp = arg_Status(status) 249 with nogil: CHKERR( MPI_File_read_at_all( 250 self.ob_mpi, offset, m.buf, m.count, m.dtype, statusp) ) 251 252 def Write_at(self, Offset offset, buf, Status status=None): 253 """ 254 Write using explicit offset 255 """ 256 cdef _p_msg_io m = message_io_write(buf) 257 cdef MPI_Status *statusp = arg_Status(status) 258 with nogil: CHKERR( MPI_File_write_at( 259 self.ob_mpi, offset, m.buf, m.count, m.dtype, statusp) ) 260 261 def Write_at_all(self, Offset offset, buf, Status status=None): 262 """ 263 Collective write using explicit offset 264 """ 265 cdef _p_msg_io m = message_io_write(buf) 266 cdef MPI_Status *statusp = arg_Status(status) 267 with nogil: CHKERR( MPI_File_write_at_all( 268 self.ob_mpi, offset, m.buf, m.count, m.dtype, statusp) ) 269 270 def Iread_at(self, Offset offset, buf): 271 """ 272 Nonblocking read using explicit offset 273 """ 274 cdef _p_msg_io m = message_io_read(buf) 275 cdef Request request = <Request>Request.__new__(Request) 276 with nogil: CHKERR( MPI_File_iread_at( 277 self.ob_mpi, offset, m.buf, m.count, m.dtype, &request.ob_mpi) ) 278 request.ob_buf = m 279 return request 280 281 def Iread_at_all(self, Offset offset, buf): 282 """ 283 Nonblocking collective read using explicit offset 284 """ 285 cdef _p_msg_io m = message_io_read(buf) 286 cdef Request request = <Request>Request.__new__(Request) 287 with nogil: CHKERR( MPI_File_iread_at_all( 288 self.ob_mpi, offset, m.buf, m.count, m.dtype, &request.ob_mpi) ) 289 request.ob_buf = m 290 return request 291 292 def Iwrite_at(self, Offset offset, buf): 293 """ 294 Nonblocking write using explicit offset 295 """ 296 cdef _p_msg_io m = message_io_write(buf) 297 cdef Request request = <Request>Request.__new__(Request) 298 with nogil: CHKERR( MPI_File_iwrite_at( 299 self.ob_mpi, offset, m.buf, m.count, m.dtype, &request.ob_mpi) ) 300 request.ob_buf = m 301 return request 302 303 def Iwrite_at_all(self, Offset offset, buf): 304 """ 305 Nonblocking collective write using explicit offset 306 """ 307 cdef _p_msg_io m = message_io_write(buf) 308 cdef Request request = <Request>Request.__new__(Request) 309 with nogil: CHKERR( MPI_File_iwrite_at_all( 310 self.ob_mpi, offset, m.buf, m.count, m.dtype, &request.ob_mpi) ) 311 request.ob_buf = m 312 return request 313 314 # Data Access with Individual File Pointers 315 # ----------------------------------------- 316 317 def Read(self, buf, Status status=None): 318 """ 319 Read using individual file pointer 320 """ 321 cdef _p_msg_io m = message_io_read(buf) 322 cdef MPI_Status *statusp = arg_Status(status) 323 with nogil: CHKERR( MPI_File_read( 324 self.ob_mpi, m.buf, m.count, m.dtype, statusp) ) 325 326 def Read_all(self, buf, Status status=None): 327 """ 328 Collective read using individual file pointer 329 """ 330 cdef _p_msg_io m = message_io_read(buf) 331 cdef MPI_Status *statusp = arg_Status(status) 332 with nogil: CHKERR( MPI_File_read_all( 333 self.ob_mpi, m.buf, m.count, m.dtype, statusp) ) 334 335 def Write(self, buf, Status status=None): 336 """ 337 Write using individual file pointer 338 """ 339 cdef _p_msg_io m = message_io_write(buf) 340 cdef MPI_Status *statusp = arg_Status(status) 341 with nogil: CHKERR( MPI_File_write( 342 self.ob_mpi, m.buf, m.count, m.dtype, statusp) ) 343 344 def Write_all(self, buf, Status status=None): 345 """ 346 Collective write using individual file pointer 347 """ 348 cdef _p_msg_io m = message_io_write(buf) 349 cdef MPI_Status *statusp = arg_Status(status) 350 with nogil: CHKERR( MPI_File_write_all( 351 self.ob_mpi, m.buf, m.count, m.dtype, statusp) ) 352 353 def Iread(self, buf): 354 """ 355 Nonblocking read using individual file pointer 356 """ 357 cdef _p_msg_io m = message_io_read(buf) 358 cdef Request request = <Request>Request.__new__(Request) 359 with nogil: CHKERR( MPI_File_iread( 360 self.ob_mpi, m.buf, m.count, m.dtype, &request.ob_mpi) ) 361 request.ob_buf = m 362 return request 363 364 def Iread_all(self, buf): 365 """ 366 Nonblocking collective read using individual file pointer 367 """ 368 cdef _p_msg_io m = message_io_read(buf) 369 cdef Request request = <Request>Request.__new__(Request) 370 with nogil: CHKERR( MPI_File_iread_all( 371 self.ob_mpi, m.buf, m.count, m.dtype, &request.ob_mpi) ) 372 request.ob_buf = m 373 return request 374 375 def Iwrite(self, buf): 376 """ 377 Nonblocking write using individual file pointer 378 """ 379 cdef _p_msg_io m = message_io_write(buf) 380 cdef Request request = <Request>Request.__new__(Request) 381 with nogil: CHKERR( MPI_File_iwrite( 382 self.ob_mpi, m.buf, m.count, m.dtype, &request.ob_mpi) ) 383 request.ob_buf = m 384 return request 385 386 def Iwrite_all(self, buf): 387 """ 388 Nonblocking collective write using individual file pointer 389 """ 390 cdef _p_msg_io m = message_io_write(buf) 391 cdef Request request = <Request>Request.__new__(Request) 392 with nogil: CHKERR( MPI_File_iwrite_all( 393 self.ob_mpi, m.buf, m.count, m.dtype, &request.ob_mpi) ) 394 request.ob_buf = m 395 return request 396 397 def Seek(self, Offset offset, int whence=SEEK_SET): 398 """ 399 Update the individual file pointer 400 """ 401 with nogil: CHKERR( MPI_File_seek(self.ob_mpi, offset, whence) ) 402 403 def Get_position(self): 404 """ 405 Return the current position of the individual file pointer 406 in etype units relative to the current view 407 """ 408 cdef MPI_Offset offset = 0 409 with nogil: CHKERR( MPI_File_get_position(self.ob_mpi, &offset) ) 410 return offset 411 412 def Get_byte_offset(self, Offset offset): 413 """ 414 Returns the absolute byte position in the file corresponding 415 to 'offset' etypes relative to the current view 416 """ 417 cdef MPI_Offset disp = 0 418 with nogil: CHKERR( MPI_File_get_byte_offset( 419 self.ob_mpi, offset, &disp) ) 420 return disp 421 422 # Data Access with Shared File Pointers 423 # ------------------------------------- 424 425 def Read_shared(self, buf, Status status=None): 426 """ 427 Read using shared file pointer 428 """ 429 cdef _p_msg_io m = message_io_read(buf) 430 cdef MPI_Status *statusp = arg_Status(status) 431 with nogil: CHKERR( MPI_File_read_shared( 432 self.ob_mpi, m.buf, m.count, m.dtype, statusp) ) 433 434 def Write_shared(self, buf, Status status=None): 435 """ 436 Write using shared file pointer 437 """ 438 cdef _p_msg_io m = message_io_write(buf) 439 cdef MPI_Status *statusp = arg_Status(status) 440 with nogil: CHKERR( MPI_File_write_shared( 441 self.ob_mpi, m.buf, m.count, m.dtype, statusp) ) 442 443 def Iread_shared(self, buf): 444 """ 445 Nonblocking read using shared file pointer 446 """ 447 cdef _p_msg_io m = message_io_read(buf) 448 cdef Request request = <Request>Request.__new__(Request) 449 with nogil: CHKERR( MPI_File_iread_shared( 450 self.ob_mpi, m.buf, m.count, m.dtype, &request.ob_mpi) ) 451 request.ob_buf = m 452 return request 453 454 def Iwrite_shared(self, buf): 455 """ 456 Nonblocking write using shared file pointer 457 """ 458 cdef _p_msg_io m = message_io_write(buf) 459 cdef Request request = <Request>Request.__new__(Request) 460 with nogil: CHKERR( MPI_File_iwrite_shared( 461 self.ob_mpi, m.buf, m.count, m.dtype, &request.ob_mpi) ) 462 request.ob_buf = m 463 return request 464 465 def Read_ordered(self, buf, Status status=None): 466 """ 467 Collective read using shared file pointer 468 """ 469 cdef _p_msg_io m = message_io_read(buf) 470 cdef MPI_Status *statusp = arg_Status(status) 471 with nogil: CHKERR( MPI_File_read_ordered( 472 self.ob_mpi, m.buf, m.count, m.dtype, statusp) ) 473 474 def Write_ordered(self, buf, Status status=None): 475 """ 476 Collective write using shared file pointer 477 """ 478 cdef _p_msg_io m = message_io_write(buf) 479 cdef MPI_Status *statusp = arg_Status(status) 480 with nogil: CHKERR( MPI_File_write_ordered( 481 self.ob_mpi, m.buf, m.count, m.dtype, statusp) ) 482 483 def Seek_shared(self, Offset offset, int whence=SEEK_SET): 484 """ 485 Update the shared file pointer 486 """ 487 with nogil: CHKERR( MPI_File_seek_shared( 488 self.ob_mpi, offset, whence) ) 489 490 def Get_position_shared(self): 491 """ 492 Return the current position of the shared file pointer 493 in etype units relative to the current view 494 """ 495 cdef MPI_Offset offset = 0 496 with nogil: CHKERR( MPI_File_get_position_shared( 497 self.ob_mpi, &offset) ) 498 return offset 499 500 # Split Collective Data Access Routines 501 # ------------------------------------- 502 503 # explicit offset 504 505 def Read_at_all_begin(self, Offset offset, buf): 506 """ 507 Start a split collective read using explict offset 508 """ 509 cdef _p_msg_io m = message_io_read(buf) 510 with nogil: CHKERR( MPI_File_read_at_all_begin( 511 self.ob_mpi, offset, m.buf, m.count, m.dtype) ) 512 513 def Read_at_all_end(self, buf, Status status=None): 514 """ 515 Complete a split collective read using explict offset 516 """ 517 cdef _p_msg_io m = message_io_read(buf) 518 cdef MPI_Status *statusp = arg_Status(status) 519 with nogil: CHKERR( MPI_File_read_at_all_end( 520 self.ob_mpi, m.buf, statusp) ) 521 522 def Write_at_all_begin(self, Offset offset, buf): 523 """ 524 Start a split collective write using explict offset 525 """ 526 cdef _p_msg_io m = message_io_write(buf) 527 with nogil: CHKERR( MPI_File_write_at_all_begin( 528 self.ob_mpi, offset, m.buf, m.count, m.dtype) ) 529 530 def Write_at_all_end(self, buf, Status status=None): 531 """ 532 Complete a split collective write using explict offset 533 """ 534 cdef _p_msg_io m = message_io_write(buf) 535 cdef MPI_Status *statusp = arg_Status(status) 536 with nogil: CHKERR( MPI_File_write_at_all_end( 537 self.ob_mpi, m.buf, statusp) ) 538 539 # individual file pointer 540 541 def Read_all_begin(self, buf): 542 """ 543 Start a split collective read 544 using individual file pointer 545 """ 546 cdef _p_msg_io m = message_io_read(buf) 547 with nogil: CHKERR( MPI_File_read_all_begin( 548 self.ob_mpi, m.buf, m.count, m.dtype) ) 549 550 def Read_all_end(self, buf, Status status=None): 551 """ 552 Complete a split collective read 553 using individual file pointer 554 """ 555 cdef _p_msg_io m = message_io_read(buf) 556 cdef MPI_Status *statusp = arg_Status(status) 557 with nogil: CHKERR( MPI_File_read_all_end( 558 self.ob_mpi, m.buf, statusp) ) 559 560 def Write_all_begin(self, buf): 561 """ 562 Start a split collective write 563 using individual file pointer 564 """ 565 cdef _p_msg_io m = message_io_write(buf) 566 with nogil: CHKERR( MPI_File_write_all_begin( 567 self.ob_mpi, m.buf, m.count, m.dtype) ) 568 569 def Write_all_end(self, buf, Status status=None): 570 """ 571 Complete a split collective write 572 using individual file pointer 573 """ 574 cdef _p_msg_io m = message_io_write(buf) 575 cdef MPI_Status *statusp = arg_Status(status) 576 with nogil: CHKERR( MPI_File_write_all_end( 577 self.ob_mpi, m.buf, statusp) ) 578 579 # shared file pointer 580 581 def Read_ordered_begin(self, buf): 582 """ 583 Start a split collective read 584 using shared file pointer 585 """ 586 cdef _p_msg_io m = message_io_read(buf) 587 with nogil: CHKERR( MPI_File_read_ordered_begin( 588 self.ob_mpi, m.buf, m.count, m.dtype) ) 589 590 def Read_ordered_end(self, buf, Status status=None): 591 """ 592 Complete a split collective read 593 using shared file pointer 594 """ 595 cdef _p_msg_io m = message_io_read(buf) 596 cdef MPI_Status *statusp = arg_Status(status) 597 with nogil: CHKERR( MPI_File_read_ordered_end( 598 self.ob_mpi, m.buf, statusp) ) 599 600 def Write_ordered_begin(self, buf): 601 """ 602 Start a split collective write using 603 shared file pointer 604 """ 605 cdef _p_msg_io m = message_io_write(buf) 606 with nogil: CHKERR( MPI_File_write_ordered_begin( 607 self.ob_mpi, m.buf, m.count, m.dtype) ) 608 609 def Write_ordered_end(self, buf, Status status=None): 610 """ 611 Complete a split collective write 612 using shared file pointer 613 """ 614 cdef _p_msg_io m = message_io_write(buf) 615 cdef MPI_Status *statusp = arg_Status(status) 616 with nogil: CHKERR( MPI_File_write_ordered_end( 617 self.ob_mpi, m.buf, statusp) ) 618 619 # File Interoperability 620 # --------------------- 621 622 def Get_type_extent(self, Datatype datatype not None): 623 """ 624 Return the extent of datatype in the file 625 """ 626 cdef MPI_Aint extent = 0 627 with nogil: CHKERR( MPI_File_get_type_extent( 628 self.ob_mpi, datatype.ob_mpi, &extent) ) 629 return extent 630 631 # Consistency and Semantics 632 # ------------------------- 633 634 def Set_atomicity(self, bint flag): 635 """ 636 Set the atomicity mode 637 """ 638 with nogil: CHKERR( MPI_File_set_atomicity(self.ob_mpi, flag) ) 639 640 def Get_atomicity(self): 641 """ 642 Return the atomicity mode 643 """ 644 cdef int flag = 0 645 with nogil: CHKERR( MPI_File_get_atomicity(self.ob_mpi, &flag) ) 646 return <bint>flag 647 648 property atomicity: 649 """atomicity""" 650 def __get__(self): 651 return self.Get_atomicity() 652 def __set__(self, value): 653 self.Set_atomicity(value) 654 655 def Sync(self): 656 """ 657 Causes all previous writes to be 658 transferred to the storage device 659 """ 660 with nogil: CHKERR( MPI_File_sync(self.ob_mpi) ) 661 662 # Error Handling 663 # -------------- 664 665 def Get_errhandler(self): 666 """ 667 Get the error handler for a file 668 """ 669 cdef Errhandler errhandler = <Errhandler>Errhandler.__new__(Errhandler) 670 CHKERR( MPI_File_get_errhandler(self.ob_mpi, &errhandler.ob_mpi) ) 671 return errhandler 672 673 def Set_errhandler(self, Errhandler errhandler not None): 674 """ 675 Set the error handler for a file 676 """ 677 CHKERR( MPI_File_set_errhandler(self.ob_mpi, errhandler.ob_mpi) ) 678 679 def Call_errhandler(self, int errorcode): 680 """ 681 Call the error handler installed on a file 682 """ 683 CHKERR( MPI_File_call_errhandler(self.ob_mpi, errorcode) ) 684 685 # Fortran Handle 686 # -------------- 687 688 def py2f(self): 689 """ 690 """ 691 return MPI_File_c2f(self.ob_mpi) 692 693 @classmethod 694 def f2py(cls, arg): 695 """ 696 """ 697 cdef File file = <File>File.__new__(File) 698 file.ob_mpi = MPI_File_f2c(arg) 699 return file 700 701 702 703cdef File __FILE_NULL__ = new_File(MPI_FILE_NULL) 704 705 706# Predefined file handles 707# ----------------------- 708 709FILE_NULL = __FILE_NULL__ #: Null file handle 710 711 712# User-defined data representations 713# --------------------------------- 714 715def Register_datarep(datarep, read_fn, write_fn, extent_fn): 716 """ 717 Register user-defined data representations 718 """ 719 cdef char *cdatarep = NULL 720 datarep = asmpistr(datarep, &cdatarep) 721 cdef object state = _p_datarep(read_fn, write_fn, extent_fn) 722 cdef MPI_Datarep_conversion_function *rd = MPI_CONVERSION_FN_NULL 723 cdef MPI_Datarep_conversion_function *wr = MPI_CONVERSION_FN_NULL 724 cdef MPI_Datarep_extent_function *ex = datarep_extent_fn 725 cdef void* xs = <void*>state 726 if read_fn is not None: rd = datarep_read_fn 727 if write_fn is not None: wr = datarep_write_fn 728 CHKERR ( MPI_Register_datarep(cdatarep, rd, wr, ex, xs) ) 729 datarep_registry[datarep] = state 730