1 /*
2 * Copyright (C) by Argonne National Laboratory
3 * See COPYRIGHT in top-level directory
4 */
5
6 /* One-Sided MPI 2-D Strided Accumulate Test
7 *
8 * Author: James Dinan <dinan@mcs.anl.gov>
9 * Date : December, 2010
10 *
11 * This code performs N strided put operations followed by get operations into
12 * a 2d patch of a shared array. The array has dimensions [X, Y] and the
13 * subarray has dimensions [SUB_X, SUB_Y] and begins at index [0, 0]. The
14 * input and output buffers are specified using an MPI indexed type.
15 */
16
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <math.h>
20 #include <mpi.h>
21 #include "mpitest.h"
22 #include "squelch.h"
23
24 #define XDIM 8
25 #define YDIM 1024
26 #define SUB_XDIM 8
27 #define SUB_YDIM 255
28 #define ITERATIONS 10
29
main(int argc,char ** argv)30 int main(int argc, char **argv)
31 {
32 int i, j, rank, nranks, peer, bufsize, errors;
33 double *win_buf, *src_buf, *dst_buf;
34 MPI_Win buf_win;
35
36 MTest_Init(&argc, &argv);
37
38 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
39 MPI_Comm_size(MPI_COMM_WORLD, &nranks);
40
41 bufsize = XDIM * YDIM * sizeof(double);
42 MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &win_buf);
43 MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &src_buf);
44 MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &dst_buf);
45
46 for (i = 0; i < XDIM * YDIM; i++) {
47 *(win_buf + i) = -1.0;
48 *(src_buf + i) = 1.0 + rank;
49 }
50
51 MPI_Win_create(win_buf, bufsize, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &buf_win);
52
53 peer = (rank + 1) % nranks;
54
55 /* Perform ITERATIONS strided accumulate operations */
56
57 for (i = 0; i < ITERATIONS; i++) {
58 int idx_rem[SUB_YDIM];
59 int blk_len[SUB_YDIM];
60 MPI_Datatype src_type, dst_type;
61
62 for (j = 0; j < SUB_YDIM; j++) {
63 idx_rem[j] = j * XDIM;
64 blk_len[j] = SUB_XDIM;
65 }
66
67 MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &src_type);
68 MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &dst_type);
69
70 MPI_Type_commit(&src_type);
71 MPI_Type_commit(&dst_type);
72
73 MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
74 MPI_Put(src_buf, 1, src_type, peer, 0, 1, dst_type, buf_win);
75 MPI_Win_unlock(peer, buf_win);
76
77 MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
78 MPI_Get(dst_buf, 1, src_type, peer, 0, 1, dst_type, buf_win);
79 MPI_Win_unlock(peer, buf_win);
80
81 MPI_Type_free(&src_type);
82 MPI_Type_free(&dst_type);
83 }
84
85 MPI_Barrier(MPI_COMM_WORLD);
86
87 /* Verify that the results are correct */
88
89 MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, buf_win);
90 errors = 0;
91 for (i = 0; i < SUB_XDIM; i++) {
92 for (j = 0; j < SUB_YDIM; j++) {
93 const double actual = *(win_buf + i + j * XDIM);
94 const double expected = (1.0 + ((rank + nranks - 1) % nranks));
95 if (fabs(actual - expected) > 1.0e-10) {
96 SQUELCH(printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
97 rank, j, i, expected, actual););
98 errors++;
99 fflush(stdout);
100 }
101 }
102 }
103 for (i = SUB_XDIM; i < XDIM; i++) {
104 for (j = 0; j < SUB_YDIM; j++) {
105 const double actual = *(win_buf + i + j * XDIM);
106 const double expected = -1.0;
107 if (fabs(actual - expected) > 1.0e-10) {
108 SQUELCH(printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
109 rank, j, i, expected, actual););
110 errors++;
111 fflush(stdout);
112 }
113 }
114 }
115 for (i = 0; i < XDIM; i++) {
116 for (j = SUB_YDIM; j < YDIM; j++) {
117 const double actual = *(win_buf + i + j * XDIM);
118 const double expected = -1.0;
119 if (fabs(actual - expected) > 1.0e-10) {
120 SQUELCH(printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
121 rank, j, i, expected, actual););
122 errors++;
123 fflush(stdout);
124 }
125 }
126 }
127 MPI_Win_unlock(rank, buf_win);
128
129 MPI_Win_free(&buf_win);
130 MPI_Free_mem(win_buf);
131 MPI_Free_mem(src_buf);
132 MPI_Free_mem(dst_buf);
133
134 MTest_Finalize(errors);
135 return MTestReturnValue(errors);
136 }
137