1 /*
2  * -- High Performance Computing Linpack Benchmark (HPL)
3  *    HPL - 2.3 - December 2, 2018
4  *    Antoine P. Petitet
5  *    University of Tennessee, Knoxville
6  *    Innovative Computing Laboratory
7  *    (C) Copyright 2000-2008 All Rights Reserved
8  *
9  * -- Copyright notice and Licensing terms:
10  *
11  * Redistribution  and  use in  source and binary forms, with or without
12  * modification, are  permitted provided  that the following  conditions
13  * are met:
14  *
15  * 1. Redistributions  of  source  code  must retain the above copyright
16  * notice, this list of conditions and the following disclaimer.
17  *
18  * 2. Redistributions in binary form must reproduce  the above copyright
19  * notice, this list of conditions,  and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution.
21  *
22  * 3. All  advertising  materials  mentioning  features  or  use of this
23  * software must display the following acknowledgement:
24  * This  product  includes  software  developed  at  the  University  of
25  * Tennessee, Knoxville, Innovative Computing Laboratory.
26  *
27  * 4. The name of the  University,  the name of the  Laboratory,  or the
28  * names  of  its  contributors  may  not  be used to endorse or promote
29  * products  derived   from   this  software  without  specific  written
30  * permission.
31  *
32  * -- Disclaimer:
33  *
34  * THIS  SOFTWARE  IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,  INCLUDING,  BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY
38  * OR  CONTRIBUTORS  BE  LIABLE FOR ANY  DIRECT,  INDIRECT,  INCIDENTAL,
39  * SPECIAL,  EXEMPLARY,  OR  CONSEQUENTIAL DAMAGES  (INCLUDING,  BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA OR PROFITS; OR BUSINESS INTERRUPTION)  HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT,  STRICT LIABILITY,  OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  * ---------------------------------------------------------------------
46  */
47 /*
48  * Include files
49  */
50 #include "hpl.h"
51 
52 #ifdef STDC_HEADERS
HPL_all_reduce(void * BUFFER,const int COUNT,const HPL_T_TYPE DTYPE,const HPL_T_OP OP,MPI_Comm COMM)53 int HPL_all_reduce
54 (
55    void *                           BUFFER,
56    const int                        COUNT,
57    const HPL_T_TYPE                 DTYPE,
58    const HPL_T_OP                   OP,
59    MPI_Comm                         COMM
60 )
61 #else
62 int HPL_all_reduce
63 ( BUFFER, COUNT, DTYPE, OP, COMM )
64    void *                           BUFFER;
65    const int                        COUNT;
66    const HPL_T_TYPE                 DTYPE;
67    const HPL_T_OP                   OP;
68    MPI_Comm                         COMM;
69 #endif
70 {
71 /*
72  * Purpose
73  * =======
74  *
75  * HPL_all_reduce performs   a   global   reduce  operation  across  all
76  * processes of a group leaving the results on all processes.
77  *
78  * Arguments
79  * =========
80  *
81  * BUFFER  (local input/global output)   void *
82  *         On entry,  BUFFER  points to  the  buffer to be combined.  On
83  *         exit, this array contains the combined data and  is identical
84  *         on all processes in the group.
85  *
86  * COUNT   (global input)                const int
87  *         On entry,  COUNT  indicates the number of entries in  BUFFER.
88  *         COUNT must be at least zero.
89  *
90  * DTYPE   (global input)                const HPL_T_TYPE
91  *         On entry,  DTYPE  specifies the type of the buffers operands.
92  *
93  * OP      (global input)                const HPL_T_OP
94  *         On entry, OP is a pointer to the local combine function.
95  *
96  * COMM    (global/local input)          MPI_Comm
97  *         The MPI communicator identifying the process collection.
98  *
99  * ---------------------------------------------------------------------
100  */
101 /*
102  * .. Local Variables ..
103  */
104    int                        hplerr;
105 /* ..
106  * .. Executable Statements ..
107  */
108    hplerr = HPL_reduce(   BUFFER, COUNT, DTYPE, OP, 0, COMM );
109    if( hplerr != MPI_SUCCESS ) return( hplerr );
110    return( HPL_broadcast( BUFFER, COUNT, DTYPE,     0, COMM ) );
111 /*
112  * End of HPL_all_reduce
113  */
114 }
115