1 /****************************************************************/
2 /* Parallel Combinatorial BLAS Library (for Graph Computations) */
3 /* version 1.6 -------------------------------------------------*/
4 /* date: 6/15/2017 ---------------------------------------------*/
5 /* authors: Ariful Azad, Aydin Buluc  --------------------------*/
6 /****************************************************************/
7 /*
8  Copyright (c) 2010-2017, The Regents of the University of California
9 
10  Permission is hereby granted, free of charge, to any person obtaining a copy
11  of this software and associated documentation files (the "Software"), to deal
12  in the Software without restriction, including without limitation the rights
13  to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14  copies of the Software, and to permit persons to whom the Software is
15  furnished to do so, subject to the following conditions:
16 
17  The above copyright notice and this permission notice shall be included in
18  all copies or substantial portions of the Software.
19 
20  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26  THE SOFTWARE.
27  */
28 
29 #include <numeric>
30 #include "DenseParMat.h"
31 #include "MPIType.h"
32 #include "Operations.h"
33 
34 namespace combblas {
35 
36 template <class IT, class NT>
37 template <typename _BinaryOperation>
Reduce(Dim dim,_BinaryOperation __binary_op,NT identity) const38 FullyDistVec< IT,NT > DenseParMat<IT,NT>::Reduce(Dim dim, _BinaryOperation __binary_op, NT identity) const
39 {
40 
41 	switch(dim)
42 	{
43 		case Column:	// pack along the columns, result is a vector of size (global) n
44 		{
45 			// we can use parvec's grid as long as the process grid is square (currently a CombBLAS requirement)
46 
47 			int colneighs = commGrid->GetGridRows();	// including oneself
48             		int colrank = commGrid->GetRankInProcCol();
49 
50 			IT * loclens = new IT[colneighs];
51 			IT * lensums = new IT[colneighs+1]();	// begin/end points of local lengths
52 
53             		IT n_perproc = n / colneighs;    // length on a typical processor
54             		if(colrank == colneighs-1)
55                 		loclens[colrank] = n - (n_perproc*colrank);
56             		else
57                 		loclens[colrank] = n_perproc;
58 
59 			MPI_Allgather(MPI_IN_PLACE, 0, MPIType<IT>(), loclens, 1, MPIType<IT>(), commGrid->GetColWorld());
60 			std::partial_sum(loclens, loclens+colneighs, lensums+1);	// loclens and lensums are different, but both would fit in 32-bits
61 
62 			std::vector<NT> trarr(loclens[colrank]);
63 			NT * sendbuf = new NT[n];
64 			for(int j=0; j < n; ++j)
65 			{
66 				sendbuf[j] = identity;
67 				for(int i=0; i < m; ++i)
68 				{
69 					sendbuf[j] = __binary_op(array[i][j], sendbuf[j]);
70 				}
71 			}
72 
73 			// The MPI_REDUCE_SCATTER routine is functionally equivalent to:
74             		// an MPI_REDUCE collective operation with count equal to the sum of loclens[i]
75            		// followed by MPI_SCATTERV with sendcounts equal to loclens as well
76             		MPI_Reduce_scatter(sendbuf, trarr.data(), loclens, MPIType<NT>(), MPIOp<_BinaryOperation, NT>::op(), commGrid->GetColWorld());
77 
78 			DeleteAll(sendbuf, loclens, lensums);
79 
80 			IT reallen;	// Now we have to transpose the vector
81 			IT trlen = trarr.size();
82 			int diagneigh = commGrid->GetComplementRank();
83 			MPI_Status status;
84 			MPI_Sendrecv(&trlen, 1, MPIType<IT>(), diagneigh, TRNNZ, &reallen, 1, MPIType<IT>(), diagneigh, TRNNZ, commGrid->GetWorld(), &status);
85 			IT glncols = gcols();
86             		FullyDistVec<IT,NT> parvec(commGrid, glncols, identity);
87 
88 			assert((parvec.arr.size() ==  reallen));
89 			MPI_Sendrecv(trarr.data(), trlen, MPIType<NT>(), diagneigh, TRX, parvec.arr.data(), reallen, MPIType<NT>(), diagneigh, TRX, commGrid->GetWorld(), &status);
90 
91 
92             		return parvec;
93 			break;
94 		}
95 		case Row:	// pack along the rows, result is a vector of size m
96 		{
97 			IT glnrows = grows();
98             		FullyDistVec<IT,NT> parvec(commGrid, glnrows, identity);
99 
100 			NT * sendbuf = new NT[m];
101 			for(int i=0; i < m; ++i)
102 			{
103 				sendbuf[i] = std::accumulate( array[i], array[i]+n, identity, __binary_op);
104 			}
105 			NT * recvbuf = parvec.arr.data();
106 
107 
108             		int rowneighs = commGrid->GetGridCols();
109            		int rowrank = commGrid->GetRankInProcRow();
110             		IT * recvcounts = new IT[rowneighs];
111             		recvcounts[rowrank] = parvec.MyLocLength();  // local vector lengths are the ultimate receive counts
112             		MPI_Allgather(MPI_IN_PLACE, 0, MPIType<IT>(), recvcounts, 1, MPIType<IT>(), commGrid->GetRowWorld());
113 
114             		// The MPI_REDUCE_SCATTER routine is functionally equivalent to:
115             		// an MPI_REDUCE collective operation with count equal to the sum of recvcounts[i]
116            		// followed by MPI_SCATTERV with sendcounts equal to recvcounts.
117             		MPI_Reduce_scatter(sendbuf, recvbuf, recvcounts, MPIType<NT>(), MPIOp<_BinaryOperation, NT>::op(), commGrid->GetRowWorld());
118 			delete [] sendbuf;
119             		delete [] recvcounts;
120            		return parvec;
121 			break;
122 		}
123 		default:
124 		{
125 			std::cout << "Unknown reduction dimension, returning empty vector" << std::endl;
126             		return FullyDistVec<IT,NT>(commGrid);
127 			break;
128 		}
129 	}
130 }
131 
132 template <class IT, class NT>
133 template <typename DER>
operator +=(const SpParMat<IT,NT,DER> & rhs)134 DenseParMat< IT,NT > & DenseParMat<IT,NT>::operator+=(const SpParMat< IT,NT,DER > & rhs)		// add a sparse matrix
135 {
136 	if(*commGrid == *rhs.commGrid)
137 	{
138 		(rhs.spSeq)->UpdateDense(array, std::plus<double>());
139 	}
140 	else
141 	{
142 		std::cout << "Grids are not comparable elementwise addition" << std::endl;
143 		MPI_Abort(MPI_COMM_WORLD,GRIDMISMATCH);
144 	}
145 	return *this;
146 }
147 
148 
149 template <class IT, class NT>
operator =(const DenseParMat<IT,NT> & rhs)150 DenseParMat< IT,NT > &  DenseParMat<IT,NT>::operator=(const DenseParMat< IT,NT > & rhs)		// assignment operator
151 {
152 	if(this != &rhs)
153 	{
154 		if(array != NULL)
155 			SpHelper::deallocate2D(array, m);
156 
157 		m = rhs.m;
158 		n = rhs.n;
159 		if(rhs.array != NULL)
160 		{
161 			array = SpHelper::allocate2D<NT>(m, n);
162 			for(int i=0; i< m; ++i)
163 				std::copy(array[i], array[i]+n, rhs.array[i]);
164 		}
165 		commGrid.reset(new CommGrid(*(rhs.commGrid)));
166 	}
167 	return *this;
168 }
169 
170 }
171