1 /*============================================================================
2 * Convert between general domain partition and block distribution.
3 *============================================================================*/
4
5 /*
6 This file is part of Code_Saturne, a general-purpose CFD tool.
7
8 Copyright (C) 1998-2021 EDF S.A.
9
10 This program is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free Software
12 Foundation; either version 2 of the License, or (at your option) any later
13 version.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
18 details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
22 Street, Fifth Floor, Boston, MA 02110-1301, USA.
23 */
24
25 /*----------------------------------------------------------------------------*/
26
27 #include "cs_defs.h"
28
29 /*----------------------------------------------------------------------------
30 * Standard C library headers
31 *----------------------------------------------------------------------------*/
32
33 #include <assert.h>
34 #include <errno.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <string.h>
38
39 /*----------------------------------------------------------------------------
40 * Local headers
41 *----------------------------------------------------------------------------*/
42
43 #include "bft_mem.h"
44 #include "bft_error.h"
45
46 #include "cs_all_to_all.h"
47 #include "cs_block_dist.h"
48
49 /*----------------------------------------------------------------------------
50 * Header for the current file
51 *----------------------------------------------------------------------------*/
52
53 #include "cs_part_to_block.h"
54
55 /*----------------------------------------------------------------------------*/
56
57 BEGIN_C_DECLS
58
59 /*! \cond DOXYGEN_SHOULD_SKIP_THIS */
60
61 /*=============================================================================
62 * Macro definitions
63 *============================================================================*/
64
65 /*=============================================================================
66 * Local type definitions
67 *============================================================================*/
68
69 /* Structure used to redistribute data */
70
71 #if defined(HAVE_MPI)
72
73 struct _cs_part_to_block_t {
74
75 MPI_Comm comm; /* Associated MPI communicator */
76
77 int rank; /* Local rank in communicator */
78 int n_ranks; /* Number of ranks associated with
79 communicator */
80
81 cs_block_dist_info_t bi; /* Associated block information */
82
83 cs_all_to_all_t *d; /* Associated all-to-all distributor */
84
85 size_t n_block_ents; /* Number of entities to receive (this block) */
86 size_t n_part_ents; /* Number of entities to send (partition) */
87 size_t recv_size; /* Size of receive buffer for MPI_Gatherv
88 (send_size not necessary, as send_size
89 should always be equal to n_part_ents,
90 though elements may be assembled in a
91 different order) */
92
93 int *recv_count; /* Receive counts for MPI_Gatherv */
94 int *recv_displ; /* Receive displs for MPI_Gatherv */
95
96 int *block_rank_id; /* Block id for each part entity
97 (NULL if based on global_ent_num) */
98 cs_lnum_t *send_block_id; /* Id in block of sent entities */
99 cs_lnum_t *recv_block_id; /* Id in block of received entities */
100
101 const cs_gnum_t *global_ent_num; /* Shared global entity numbers */
102 cs_gnum_t *_global_ent_num; /* Private global entity numbers */
103 };
104
105 #endif /* defined(HAVE_MPI) */
106
107 /*============================================================================
108 * Local function defintions
109 *============================================================================*/
110
111 #if defined(HAVE_MPI)
112
113 /*----------------------------------------------------------------------------
114 * Compute rank displacement based on count.
115 *
116 * arguments:
117 * n_ranks <-- number of ranks
118 * count <-- number of entities per rank (size: n_ranks)
119 * displ --> entity displacement in cumulative array (size: n_ranks)
120 *
121 * returns:
122 * cumulative count for all ranks
123 *----------------------------------------------------------------------------*/
124
125 static cs_lnum_t
_compute_displ(int n_ranks,const int count[],int displ[])126 _compute_displ(int n_ranks,
127 const int count[],
128 int displ[])
129 {
130 int i;
131 cs_lnum_t total_count = 0;
132
133 displ[0] = 0;
134
135 for (i = 1; i < n_ranks; i++)
136 displ[i] = displ[i-1] + count[i-1];
137
138 total_count = displ[n_ranks-1] + count[n_ranks-1];
139
140 return total_count;
141 }
142
143 /*----------------------------------------------------------------------------
144 * Create distribution helper structure.
145 *
146 * Send and receive counts and displacements are allocated, but not
147 * fully initialized at this point: only the send count is set to zero.
148 *
149 * arguments:
150 * comm <-- communicator
151 *
152 * returns:
153 * empty communicator structure
154 *----------------------------------------------------------------------------*/
155
156 static cs_part_to_block_t *
_part_to_block_create(MPI_Comm comm)157 _part_to_block_create(MPI_Comm comm)
158 {
159 cs_part_to_block_t *d;
160
161 BFT_MALLOC(d, 1, cs_part_to_block_t);
162
163 d->comm = comm;
164
165 MPI_Comm_rank(comm, &(d->rank));
166 MPI_Comm_size(comm, &(d->n_ranks));
167
168 memset(&(d->bi), 0, sizeof(cs_block_dist_info_t));
169
170 d->d = NULL;
171
172 d->n_block_ents = 0;
173 d->n_part_ents = 0;
174 d->recv_size = 0;
175
176 d->recv_count = NULL;
177 d->recv_displ = NULL;
178
179 d->block_rank_id = NULL;
180 d->send_block_id = NULL;
181 d->recv_block_id = NULL;
182 d->global_ent_num = NULL;
183 d->_global_ent_num = NULL;
184
185 return d;
186 }
187
188 /*----------------------------------------------------------------------------
189 * Initialize partition to block distributor based on global element numbers,
190 * using gather to rank 0 when only one block is active.
191 *
192 * arguments:
193 * d <-> partition to block distributor
194 * comm <-- communicator
195 *
196 * returns:
197 * initialized partition to block distributor
198 *----------------------------------------------------------------------------*/
199
200 static void
_init_gather_by_gnum(cs_part_to_block_t * d,MPI_Comm comm)201 _init_gather_by_gnum(cs_part_to_block_t *d,
202 MPI_Comm comm)
203 {
204 size_t j;
205
206 int send_count = d->n_part_ents;
207 cs_lnum_t *send_block_id = NULL;
208
209 const int n_ranks = d->n_ranks;
210
211 const cs_gnum_t *global_ent_num = d->global_ent_num;
212
213 /* Initialize send and receive counts */
214
215 if (d->rank == 0) {
216 BFT_MALLOC(d->recv_count, n_ranks, int);
217 BFT_MALLOC(d->recv_displ, n_ranks, int);
218 }
219
220 /* Count values to send and receive */
221
222 MPI_Gather(&send_count, 1, MPI_INT, d->recv_count, 1, MPI_INT, 0, comm);
223
224 if (d->rank == 0)
225 d->recv_size = _compute_displ(n_ranks, d->recv_count, d->recv_displ);
226
227 /* Prepare list of local block ids of sent elements */
228
229 if (d->rank == 0)
230 BFT_MALLOC(d->recv_block_id, d->recv_size, cs_lnum_t);
231
232 BFT_MALLOC(send_block_id, d->n_part_ents, cs_lnum_t);
233
234 for (j = 0; j < d->n_part_ents; j++)
235 send_block_id[j] = global_ent_num[j] -1;
236
237 /* Exchange values */
238
239 MPI_Gatherv(send_block_id, send_count, CS_MPI_LNUM,
240 d->recv_block_id, d->recv_count, d->recv_displ, CS_MPI_LNUM,
241 0, d->comm);
242
243 BFT_FREE(send_block_id);
244 }
245
246 /*----------------------------------------------------------------------------
247 * Copy array data from block distribution to general domain partition,
248 * using gather to rank 0 when only one block is active.
249 *
250 * arguments:
251 * d <-- partition to block distributor
252 * datatype <-- type of data considered
253 * stride <-- number of values per entity (interlaced)
254 * part_values <-- values in general domain partition
255 * block_values --> values in block distribution
256 *----------------------------------------------------------------------------*/
257
258 static void
_copy_array_gatherv(cs_part_to_block_t * d,cs_datatype_t datatype,int stride,const void * part_values,void * block_values)259 _copy_array_gatherv(cs_part_to_block_t *d,
260 cs_datatype_t datatype,
261 int stride,
262 const void *part_values,
263 void *block_values)
264 {
265 int i;
266 size_t j, k;
267
268 unsigned char *send_buf = NULL;
269 unsigned char *recv_buf = NULL;
270
271 int send_count = d->n_part_ents * stride;
272
273 size_t stride_size = cs_datatype_size[datatype]*stride;
274 MPI_Datatype mpi_type = cs_datatype_to_mpi[datatype];
275
276 unsigned char *_block_values = block_values;
277
278 const int n_ranks = d->n_ranks;
279 const size_t n_recv_ents = d->recv_size;
280
281 /* Adjust send and receive dimensions */
282
283 if (stride > 1 && d->rank == 0) {
284 for (i = 0; i < n_ranks; i++) {
285 d->recv_count[i] *= stride;
286 d->recv_displ[i] *= stride;
287 }
288 }
289
290 BFT_MALLOC(recv_buf, n_recv_ents*stride_size, unsigned char);
291
292 BFT_MALLOC(send_buf, d->n_part_ents*stride_size, unsigned char);
293 if (d->n_part_ents > 0)
294 memcpy(send_buf, part_values, d->n_part_ents*stride_size);
295
296 /* Exchange values */
297
298 MPI_Gatherv(send_buf, send_count, mpi_type,
299 recv_buf, d->recv_count, d->recv_displ, mpi_type,
300 0, d->comm);
301
302 /* Distribute received values */
303
304 for (j = 0; j < n_recv_ents; j++) {
305
306 size_t r_displ = j*stride_size;
307 size_t w_displ = d->recv_block_id[j]*stride_size;
308
309 for (k = 0; k < stride_size; k++)
310 _block_values[w_displ + k] = recv_buf[r_displ + k];
311 }
312
313 /* Cleanup */
314
315 BFT_FREE(recv_buf);
316 BFT_FREE(send_buf);
317
318 /* Reset send and receive dimensions */
319
320 if (stride > 1 && d->rank == 0) {
321 for (i = 0; i < n_ranks; i++) {
322 d->recv_count[i] /= stride;
323 d->recv_displ[i] /= stride;
324 }
325 }
326 }
327
328 /*----------------------------------------------------------------------------
329 * Copy array data from block distribution to general domain partition,
330 * using gather to rank 0 when only one block is active.
331 *
332 * arguments:
333 * d <-- partition to block distributor
334 * part_index <-- local index in general partition distribution
335 * (size: n_part_entities + 1)
336 * block_index --> local index in block distribution
337 * (size: n_block_entities + 1)
338 *----------------------------------------------------------------------------*/
339
340 static void
_copy_index_gatherv(cs_part_to_block_t * d,const cs_lnum_t * part_index,cs_lnum_t * block_index)341 _copy_index_gatherv(cs_part_to_block_t *d,
342 const cs_lnum_t *part_index,
343 cs_lnum_t *block_index)
344 {
345 size_t j;
346
347 cs_lnum_t *send_buf = NULL;
348 cs_lnum_t *recv_buf = NULL;
349
350 int send_count = d->n_part_ents;
351
352 const size_t n_recv_ents = d->recv_size;
353
354 /* Prepare MPI buffers */
355
356 BFT_MALLOC(send_buf, d->n_part_ents, cs_lnum_t);
357
358 /* Prepare list of element values to send */
359
360 for (j = 0; j < d->n_part_ents; j++)
361 send_buf[j] = part_index[j+1] - part_index[j];
362
363 BFT_MALLOC(recv_buf, n_recv_ents, cs_lnum_t);
364
365 /* Exchange values */
366
367 MPI_Gatherv(send_buf, send_count, CS_MPI_LNUM,
368 recv_buf, d->recv_count, d->recv_displ, CS_MPI_LNUM,
369 0, d->comm);
370
371 /* Distribute received values */
372
373 if (block_index != NULL) {
374
375 for (j = 0; j < d->n_block_ents+1; j++)
376 block_index[j] = 0;
377
378 for (j = 0; j < n_recv_ents; j++) {
379 assert( block_index[d->recv_block_id[j]+1] == 0
380 || block_index[d->recv_block_id[j]+1] == recv_buf[j]);
381 block_index[d->recv_block_id[j]+1] = recv_buf[j];
382 }
383
384 /* Transform count to index */
385
386 for (j = 0; j < d->n_block_ents; j++)
387 block_index[j+1] += block_index[j];
388 }
389
390 /* Cleanup */
391
392 BFT_FREE(recv_buf);
393 BFT_FREE(send_buf);
394 }
395
396 /*----------------------------------------------------------------------------
397 * Copy indexed data from general domain partition to block distribution,
398 * using gather to rank 0 when only one block is active.
399 *
400 * This is useful for distribution of entity connectivity information.
401 *
402 * arguments:
403 * d <-- partition to block distributor
404 * datatype <-- type of data considered
405 * part_index <-- local index in general distribution
406 * part_val <-- numbers in general distribution
407 * (size: send_index[n_part_ents])
408 * block_index --> local index in block distribution
409 * block_val --> values in block distribution
410 * (size: recv_index[n_block_ents])
411 *----------------------------------------------------------------------------*/
412
413 static void
_copy_indexed_gatherv(cs_part_to_block_t * d,cs_datatype_t datatype,const cs_lnum_t * part_index,const void * part_val,const cs_lnum_t * block_index,void * block_val)414 _copy_indexed_gatherv(cs_part_to_block_t *d,
415 cs_datatype_t datatype,
416 const cs_lnum_t *part_index,
417 const void *part_val,
418 const cs_lnum_t *block_index,
419 void *block_val)
420 {
421 int i, l;
422 size_t j, k;
423
424 size_t w_displ = 0, r_displ = 0;
425 size_t recv_size = 0;
426 int send_count = 0;
427
428 int *recv_count = NULL;
429 int *recv_displ = NULL;
430
431 unsigned char *send_buf = NULL;
432 unsigned char *recv_buf = NULL;
433
434 const unsigned char *_part_val = part_val;
435 unsigned char *_block_val = block_val;
436
437 size_t type_size = cs_datatype_size[datatype];
438 MPI_Datatype mpi_type = cs_datatype_to_mpi[datatype];
439
440 const int n_ranks = d->n_ranks;
441 const size_t n_recv_ents = d->recv_size;
442
443 /* Build send and receive counts */
444 /*-------------------------------*/
445
446 if (d->rank == 0) {
447 BFT_MALLOC(recv_count, n_ranks, int);
448 BFT_MALLOC(recv_displ, n_ranks, int);
449 for (i = 0; i < n_ranks; i++)
450 recv_count[i] = 0;
451 }
452
453 /* Prepare count of element values to send */
454
455 for (j = 0; j < d->n_part_ents; j++)
456 send_count += part_index[j+1] - part_index[j];
457
458 /* Prepare count of element values to receive */
459
460 if (d->rank == 0) {
461 k = 0;
462 for (i = 0; i < n_ranks; i++) {
463 for (l = 0; l < d->recv_count[i]; l++) {
464 w_displ = d->recv_block_id[k++];
465 recv_count[i] += block_index[w_displ + 1] - block_index[w_displ];
466 }
467 }
468 recv_size = _compute_displ(n_ranks, recv_count, recv_displ);
469 }
470
471 /* Build send and receive buffers */
472 /*--------------------------------*/
473
474 if (d->rank == 0)
475 BFT_MALLOC(recv_buf, recv_size*type_size, unsigned char);
476
477 BFT_MALLOC(send_buf, send_count * type_size, unsigned char);
478
479 w_displ = 0;
480 for (j = 0; j < d->n_part_ents; j++) {
481 size_t ent_size = (part_index[j+1] - part_index[j]);
482 r_displ = part_index[j]*type_size;
483 for (k = 0; k < ent_size*type_size; k++)
484 send_buf[w_displ + k] = _part_val[r_displ + k];
485 w_displ += ent_size*type_size;
486 }
487
488 assert(w_displ == send_count*type_size);
489
490 /* Exchange values */
491
492 MPI_Gatherv(send_buf, send_count, mpi_type,
493 recv_buf, recv_count, recv_displ, mpi_type,
494 0, d->comm);
495
496 BFT_FREE(send_buf);
497
498 /* Distribute received values */
499
500 if (block_index != NULL) {
501
502 r_displ = 0;
503
504 for (j = 0; j < n_recv_ents; j++) {
505
506 size_t block_id = d->recv_block_id[j];
507 size_t ent_size = (block_index[block_id+1] - block_index[block_id])
508 * type_size;
509 w_displ = block_index[block_id] * type_size;
510
511 for (k = 0; k < ent_size; k++)
512 _block_val[w_displ++] = recv_buf[r_displ++];
513 }
514
515 assert(r_displ == recv_size*type_size);
516 }
517
518 /* Cleanup */
519
520 if (d->rank == 0) {
521 BFT_FREE(recv_buf);
522 BFT_FREE(recv_count);
523 BFT_FREE(recv_displ);
524 }
525 }
526
527 #endif /* defined(HAVE_MPI) */
528
529 /*! (DOXYGEN_SHOULD_SKIP_THIS) \endcond */
530
531 /*============================================================================
532 * Public function definitions
533 *============================================================================*/
534
535 #if defined(HAVE_MPI)
536
537 /*----------------------------------------------------------------------------
538 * Initialize partition to block distributor based on global entity numbers.
539 *
540 * arguments:
541 * comm <-- communicator
542 * bi <-- block size and range info
543 * n_ents <-- number of elements in partition
544 * global_ent_num <-- global entity numbers
545 *
546 * returns:
547 * initialized partition to block distributor
548 *----------------------------------------------------------------------------*/
549
550 cs_part_to_block_t *
cs_part_to_block_create_by_gnum(MPI_Comm comm,cs_block_dist_info_t bi,cs_lnum_t n_ents,const cs_gnum_t global_ent_num[])551 cs_part_to_block_create_by_gnum(MPI_Comm comm,
552 cs_block_dist_info_t bi,
553 cs_lnum_t n_ents,
554 const cs_gnum_t global_ent_num[])
555 {
556 cs_part_to_block_t *d = _part_to_block_create(comm);
557
558 d->bi = bi;
559
560 d->n_block_ents = bi.gnum_range[1] - bi.gnum_range[0];
561 d->n_part_ents = n_ents;
562
563 d->global_ent_num = global_ent_num;
564
565 if (bi.n_ranks == 1)
566 _init_gather_by_gnum(d, comm);
567 else {
568 int flags = CS_ALL_TO_ALL_USE_DEST_ID | CS_ALL_TO_ALL_NO_REVERSE;
569 d->d = cs_all_to_all_create_from_block(n_ents,
570 flags,
571 global_ent_num,
572 bi,
573 comm);
574 }
575
576 /* Return initialized structure */
577
578 return d;
579 }
580
581 /*----------------------------------------------------------------------------
582 * Destroy a partition to block distributor structure.
583 *
584 * arguments:
585 * d <-> pointer to partition to block distributor structure pointer
586 *----------------------------------------------------------------------------*/
587
588 void
cs_part_to_block_destroy(cs_part_to_block_t ** d)589 cs_part_to_block_destroy(cs_part_to_block_t **d)
590 {
591 cs_part_to_block_t *_d = *d;
592
593 if (_d->d != NULL)
594 cs_all_to_all_destroy(&(_d->d));
595
596 BFT_FREE(_d->recv_count);
597 BFT_FREE(_d->recv_displ);
598
599 BFT_FREE(_d->block_rank_id);
600 BFT_FREE(_d->send_block_id);
601 BFT_FREE(_d->recv_block_id);
602
603 if (_d->_global_ent_num != NULL)
604 BFT_FREE(_d->_global_ent_num);
605
606 BFT_FREE(*d);
607 }
608
609 /*----------------------------------------------------------------------------
610 * Transfer ownership of global entity numbers to a block distributor.
611 *
612 * The global_ent_num[] array should be the same as the one used
613 * for the creation of the block distributor.
614 *
615 * arguments:
616 * d <-- distributor helper
617 * global_ent_num <-> global entity numbers
618 *----------------------------------------------------------------------------*/
619
620 void
cs_part_to_block_transfer_gnum(cs_part_to_block_t * d,cs_gnum_t global_ent_num[])621 cs_part_to_block_transfer_gnum(cs_part_to_block_t *d,
622 cs_gnum_t global_ent_num[])
623 {
624 assert(d->global_ent_num == global_ent_num);
625
626 d->_global_ent_num = global_ent_num;
627 }
628
629 /*----------------------------------------------------------------------------
630 * Return number of entities associated with local partition
631 *
632 * arguments:
633 * d <-- distributor helper
634 *
635 * returns:
636 * number of entities associated with distribution receive
637 *----------------------------------------------------------------------------*/
638
639 cs_lnum_t
cs_part_to_block_get_n_part_ents(cs_part_to_block_t * d)640 cs_part_to_block_get_n_part_ents(cs_part_to_block_t *d)
641 {
642 cs_lnum_t retval = 0;
643
644 if (d != NULL)
645 retval = d->n_part_ents;
646
647 return retval;
648 }
649
650 /*----------------------------------------------------------------------------
651 * Copy array data from general domain partition to block distribution.
652 *
653 * arguments:
654 * d <-- partition to block distributor
655 * datatype <-- type of data considered
656 * stride <-- number of values per entity (interlaced)
657 * part_values <-- values in general domain partition
658 * block_values --> values in block distribution
659 *----------------------------------------------------------------------------*/
660
661 void
cs_part_to_block_copy_array(cs_part_to_block_t * d,cs_datatype_t datatype,int stride,const void * part_values,void * block_values)662 cs_part_to_block_copy_array(cs_part_to_block_t *d,
663 cs_datatype_t datatype,
664 int stride,
665 const void *part_values,
666 void *block_values)
667 {
668 if (d->bi.n_ranks == 1)
669 _copy_array_gatherv(d,
670 datatype,
671 stride,
672 part_values,
673 block_values);
674 else
675 cs_all_to_all_copy_array(d->d,
676 datatype,
677 stride,
678 false, /* reverse */
679 part_values,
680 block_values);
681 }
682
683 /*----------------------------------------------------------------------------
684 * Copy local index from general domain partition to block distribution.
685 *
686 * This is useful for distribution of entity connectivity information.
687 *
688 * arguments:
689 * d <-- partition to block distributor
690 * part_index <-- local index in general partition distribution
691 * (size: n_part_entities + 1)
692 * block_index --> local index in block distribution
693 *----------------------------------------------------------------------------*/
694
695 void
cs_part_to_block_copy_index(cs_part_to_block_t * d,const cs_lnum_t * part_index,cs_lnum_t * block_index)696 cs_part_to_block_copy_index(cs_part_to_block_t *d,
697 const cs_lnum_t *part_index,
698 cs_lnum_t *block_index)
699 {
700 if (d->bi.n_ranks == 1)
701 _copy_index_gatherv(d, part_index, block_index);
702 else
703 cs_all_to_all_copy_index(d->d,
704 false, /* reverse */
705 part_index,
706 block_index);
707 }
708
709 /*----------------------------------------------------------------------------
710 * Copy indexed data from general domain partition to block distribution,
711 * using gather to rank 0 when only one block is active.
712 *
713 * This is useful for distribution of entity connectivity information.
714 *
715 * arguments:
716 * d <-- partition to block distributor
717 * datatype <-- type of data considered
718 * part_index <-- local index in general distribution
719 * part_val <-- numbers in general distribution
720 * (size: send_index[n_part_ents])
721 * block_index --> local index in block distribution
722 * block_val --> values in block distribution
723 * (size: recv_index[n_block_ents])
724 *----------------------------------------------------------------------------*/
725
726 void
cs_part_to_block_copy_indexed(cs_part_to_block_t * d,cs_datatype_t datatype,const cs_lnum_t * part_index,const void * part_val,const cs_lnum_t * block_index,void * block_val)727 cs_part_to_block_copy_indexed(cs_part_to_block_t *d,
728 cs_datatype_t datatype,
729 const cs_lnum_t *part_index,
730 const void *part_val,
731 const cs_lnum_t *block_index,
732 void *block_val)
733 {
734 if (d->bi.n_ranks == 1)
735 _copy_indexed_gatherv(d,
736 datatype,
737 part_index,
738 part_val,
739 block_index,
740 block_val);
741 else
742 cs_all_to_all_copy_indexed(d->d,
743 datatype,
744 false, /* reverse */
745 part_index,
746 part_val,
747 block_index,
748 block_val);
749 }
750
751 #endif /* defined(HAVE_MPI) */
752
753 /*----------------------------------------------------------------------------*/
754
755 END_C_DECLS
756