1 /*
2 * -- High Performance Computing Linpack Benchmark (HPL)
3 * HPL - 2.3 - December 2, 2018
4 * Antoine P. Petitet
5 * University of Tennessee, Knoxville
6 * Innovative Computing Laboratory
7 * (C) Copyright 2000-2008 All Rights Reserved
8 *
9 * -- Copyright notice and Licensing terms:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 *
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions, and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * 3. All advertising materials mentioning features or use of this
23 * software must display the following acknowledgement:
24 * This product includes software developed at the University of
25 * Tennessee, Knoxville, Innovative Computing Laboratory.
26 *
27 * 4. The name of the University, the name of the Laboratory, or the
28 * names of its contributors may not be used to endorse or promote
29 * products derived from this software without specific written
30 * permission.
31 *
32 * -- Disclaimer:
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY
38 * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 * ---------------------------------------------------------------------
46 */
47 /*
48 * Include files
49 */
50 #include "hpl.h"
51
52 #ifdef STDC_HEADERS
HPL_broadcast(void * BUFFER,const int COUNT,const HPL_T_TYPE DTYPE,const int ROOT,MPI_Comm COMM)53 int HPL_broadcast
54 (
55 void * BUFFER,
56 const int COUNT,
57 const HPL_T_TYPE DTYPE,
58 const int ROOT,
59 MPI_Comm COMM
60 )
61 #else
62 int HPL_broadcast
63 ( BUFFER, COUNT, DTYPE, ROOT, COMM )
64 void * BUFFER;
65 const int COUNT;
66 const HPL_T_TYPE DTYPE;
67 const int ROOT;
68 MPI_Comm COMM;
69 #endif
70 {
71 /*
72 * Purpose
73 * =======
74 *
75 * HPL_broadcast broadcasts a message from the process with rank ROOT to
76 * all processes in the group.
77 *
78 * Arguments
79 * =========
80 *
81 * BUFFER (local input/output) void *
82 * On entry, BUFFER points to the buffer to be broadcast. On
83 * exit, this array contains the broadcast data and is identical
84 * on all processes in the group.
85 *
86 * COUNT (global input) const int
87 * On entry, COUNT indicates the number of entries in BUFFER.
88 * COUNT must be at least zero.
89 *
90 * DTYPE (global input) const HPL_T_TYPE
91 * On entry, DTYPE specifies the type of the buffers operands.
92 *
93 * ROOT (global input) const int
94 * On entry, ROOT is the coordinate of the source process.
95 *
96 * COMM (global/local input) MPI_Comm
97 * The MPI communicator identifying the process collection.
98 *
99 * ---------------------------------------------------------------------
100 */
101 /*
102 * .. Local Variables ..
103 */
104 int hplerr=MPI_SUCCESS, ip2=1, kk, mask=1,
105 mpierr, mydist, partner, rank, size,
106 tag = MSGID_BEGIN_COLL;
107 MPI_Status status;
108 /* ..
109 * .. Executable Statements ..
110 */
111 if( COUNT <= 0 ) return( MPI_SUCCESS );
112 mpierr = MPI_Comm_size( COMM, &size ); if( size <= 1 ) return( mpierr );
113 mpierr = MPI_Comm_rank( COMM, &rank );
114
115 kk = size - 1;
116 while( kk > 1 ) { kk >>= 1; ip2 <<= 1; mask <<= 1; mask++; }
117 mydist = MModSub( rank, ROOT, size );
118
119 do
120 {
121 mask ^= ip2;
122 if( ( mydist & mask ) == 0 )
123 {
124 partner = mydist ^ ip2;
125
126 if( mydist & ip2 )
127 {
128 partner = MModAdd( ROOT, partner, size );
129 mpierr = MPI_Recv( BUFFER, COUNT, HPL_2_MPI_TYPE( DTYPE ),
130 partner, tag, COMM, &status );
131 }
132 else if( partner < size )
133 {
134 partner = MModAdd( ROOT, partner, size );
135 mpierr = MPI_Send( BUFFER, COUNT, HPL_2_MPI_TYPE( DTYPE ),
136 partner, tag, COMM );
137 }
138 if( mpierr != MPI_SUCCESS ) hplerr = mpierr;
139 }
140 ip2 >>= 1;
141 } while( ip2 );
142
143 return( hplerr );
144 /*
145 * End of HPL_broadcast
146 */
147 }
148