1 /****************************************************************/
2 /* Parallel Combinatorial BLAS Library (for Graph Computations) */
3 /* version 1.5 -------------------------------------------------*/
4 /* date: 10/09/2015 ---------------------------------------------*/
5 /* authors: Ariful Azad, Aydin Buluc, Adam Lugowski ------------*/
6 /****************************************************************/
7 /*
8  Copyright (c) 2010-2015, The Regents of the University of California
9 
10  Permission is hereby granted, free of charge, to any person obtaining a copy
11  of this software and associated documentation files (the "Software"), to deal
12  in the Software without restriction, including without limitation the rights
13  to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14  copies of the Software, and to permit persons to whom the Software is
15  furnished to do so, subject to the following conditions:
16 
17  The above copyright notice and this permission notice shall be included in
18  all copies or substantial portions of the Software.
19 
20  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26  THE SOFTWARE.
27  */
28 
29 #include <memory>
30 #include "CombBLAS/CommGrid.h"
31 #include "CombBLAS/SpDefs.h"
32 
33 using namespace std;
34 
35 namespace combblas {
36 
CommGrid(MPI_Comm world,int nrowproc,int ncolproc)37 CommGrid::CommGrid(MPI_Comm world, int nrowproc, int ncolproc): grrows(nrowproc), grcols(ncolproc)
38 {
39 	MPI_Comm_dup(world, &commWorld);
40 	MPI_Comm_rank(commWorld, &myrank);
41 	int nproc;
42 	MPI_Comm_size(commWorld,&nproc);
43 
44 	if(grrows == 0 && grcols == 0)
45 	{
46 		grrows = (int)std::sqrt((float)nproc);
47 		grcols = grrows;
48 
49 		if(grcols * grrows != nproc)
50 		{
51 			cerr << "This version of the Combinatorial BLAS only works on a square logical processor grid" << endl;
52 			MPI_Abort(MPI_COMM_WORLD,NOTSQUARE);
53 		}
54 	}
55 	assert((nproc == (grrows*grcols)));
56 
57 	myproccol =  (int) (myrank % grcols);
58 	myprocrow =  (int) (myrank / grcols);
59 
60 	/**
61 	  * Create row and column communicators (must be collectively called)
62 	  * C syntax: int MPI_Comm_split(MPI_Comm comm, int color, int key, MPI_Comm *newcomm)
63 	  * C++ syntax: MPI::Intercomm MPI::Intercomm::Split(int color, int key) consts
64 	  * Semantics: Processes with the same color are in the same new communicator
65 	  */
66 	MPI_Comm_split(commWorld,myprocrow, myrank,&rowWorld);
67 	MPI_Comm_split(commWorld,myproccol, myrank,&colWorld);
68 	CreateDiagWorld();
69 
70 	int rowRank, colRank;
71 	MPI_Comm_rank(rowWorld,&rowRank);
72 	MPI_Comm_rank(colWorld,&colRank);
73 	assert( (rowRank == myproccol) );
74 	assert( (colRank == myprocrow) );
75 }
76 
CreateDiagWorld()77 void CommGrid::CreateDiagWorld()
78 {
79 	if(grrows != grcols)
80 	{
81 		cout << "The grid is not square... !" << endl;
82 		cout << "Returning diagworld to everyone instead of the diagonal" << endl;
83 		diagWorld = commWorld;
84 		return;
85 	}
86 	int * process_ranks = new int[grcols];
87 	for(int i=0; i < grcols; ++i)
88 	{
89 		process_ranks[i] = i*grcols + i;
90 	}
91 	MPI_Group group;
92 	MPI_Comm_group(commWorld,&group);
93 	MPI_Group diag_group;
94 	MPI_Group_incl(group,grcols, process_ranks, &diag_group); // int MPI_Group_incl(MPI_Group group, int n, int *ranks, MPI_Group *newgroup)
95 	MPI_Group_free(&group);
96 	delete [] process_ranks;
97 
98 	// The Create() function returns MPI_COMM_NULL to processes that are NOT in group
99 	MPI_Comm_create(commWorld,diag_group,&diagWorld);
100 	MPI_Group_free(&diag_group);
101 }
102 
OnSameProcCol(int rhsrank)103 bool CommGrid::OnSameProcCol( int rhsrank)
104 {
105 	return ( myproccol == ((int) (rhsrank % grcols)) );
106 }
107 
OnSameProcRow(int rhsrank)108 bool CommGrid::OnSameProcRow( int rhsrank)
109 {
110 	return ( myprocrow == ((int) (rhsrank / grcols)) );
111 }
112 
113 //! Return rank in the column world
GetRankInProcCol(int wholerank)114 int CommGrid::GetRankInProcCol( int wholerank)
115 {
116 	return ((int) (wholerank / grcols));
117 }
118 
119 //! Return rank in the row world
GetRankInProcRow(int wholerank)120 int CommGrid::GetRankInProcRow( int wholerank)
121 {
122 	return ((int) (wholerank % grcols));
123 }
124 
125 //! Get the rank of the diagonal processor in that particular row
126 //! In the ith processor row, the diagonal processor is the ith processor within that row
GetDiagOfProcRow()127 int CommGrid::GetDiagOfProcRow( )
128 {
129 	return myprocrow;
130 }
131 
132 //! Get the rank of the diagonal processor in that particular col
133 //! In the ith processor col, the diagonal processor is the ith processor within that col
GetDiagOfProcCol()134 int CommGrid::GetDiagOfProcCol( )
135 {
136 	return myproccol;
137 }
138 
operator ==(const CommGrid & rhs) const139 bool CommGrid::operator== (const CommGrid & rhs) const
140 {
141         int result;
142 	MPI_Comm_compare(commWorld, rhs.commWorld, &result);
143 	if ((result != MPI_IDENT) && (result != MPI_CONGRUENT))
144 	{
145 		// A call to MPI::Comm::Compare after MPI::Comm::Dup returns MPI_CONGRUENT
146 		// MPI::CONGRUENT means the communicators have the same group members, in the same order
147     		return false;
148 	}
149 	return ( (grrows == rhs.grrows) && (grcols == rhs.grcols) && (myprocrow == rhs.myprocrow) && (myproccol == rhs.myproccol));
150 }
151 
152 
OpenDebugFile(string prefix,ofstream & output) const153 void CommGrid::OpenDebugFile(string prefix, ofstream & output) const
154 {
155 	stringstream ss;
156 	string rank;
157 	ss << myrank;
158 	ss >> rank;
159 	string ofilename = prefix;
160 	ofilename += rank;
161 	output.open(ofilename.c_str(), ios_base::app );
162 }
163 
ProductGrid(CommGrid * gridA,CommGrid * gridB,int & innerdim,int & Aoffset,int & Boffset)164 shared_ptr<CommGrid> ProductGrid(CommGrid * gridA, CommGrid * gridB, int & innerdim, int & Aoffset, int & Boffset)
165 {
166     if(*gridA != *gridB)
167     {
168         cout << "Grids don't confirm for multiplication" << endl;
169         MPI_Abort(MPI_COMM_WORLD,GRIDMISMATCH);
170     }
171     // AA: these parameters are kept for backward compatibility
172     // they should not be used
173 	innerdim = gridA->grcols;
174 	Aoffset = (gridA->myprocrow + gridA->myproccol) % gridA->grcols;	// get sequences that avoids contention
175 	Boffset = (gridB->myprocrow + gridB->myproccol) % gridB->grrows;
176 
177 	//MPI_Comm world = MPI_COMM_WORLD;
178 	//return shared_ptr<CommGrid>( new CommGrid(world, gridA->grrows, gridB->grcols) );
179     return shared_ptr<CommGrid>( new CommGrid(*gridA) );
180 }
181 
182 
183 }
184