1 /****************************************************************/
2 /* Parallel Combinatorial BLAS Library (for Graph Computations) */
3 /* version 1.5 -------------------------------------------------*/
4 /* date: 10/09/2015 ---------------------------------------------*/
5 /* authors: Ariful Azad, Aydin Buluc, Adam Lugowski ------------*/
6 /****************************************************************/
7 /*
8  Copyright (c) 2010-2015, The Regents of the University of California
9 
10  Permission is hereby granted, free of charge, to any person obtaining a copy
11  of this software and associated documentation files (the "Software"), to deal
12  in the Software without restriction, including without limitation the rights
13  to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14  copies of the Software, and to permit persons to whom the Software is
15  furnished to do so, subject to the following conditions:
16 
17  The above copyright notice and this permission notice shall be included in
18  all copies or substantial portions of the Software.
19 
20  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26  THE SOFTWARE.
27  */
28 
29 #ifndef _COMM_GRID_H_
30 #define _COMM_GRID_H_
31 
32 #include <iostream>
33 #include <cmath>
34 #include <cassert>
35 #include <mpi.h>
36 #include <sstream>
37 #include <string>
38 #include <fstream>
39 #include <stdint.h>
40 #include "MPIType.h"
41 
42 namespace combblas {
43 
44 class CommGrid
45 {
46 public:
47 	CommGrid(MPI_Comm world, int nrowproc, int ncolproc);
48 
~CommGrid()49 	~CommGrid()
50 	{
51 		MPI_Comm_free(&commWorld);
52 		MPI_Comm_free(&rowWorld);
53 		MPI_Comm_free(&colWorld);
54 		if(diagWorld != MPI_COMM_NULL) MPI_Comm_free(&diagWorld);
55 	}
CommGrid(const CommGrid & rhs)56 	CommGrid (const CommGrid & rhs): grrows(rhs.grrows), grcols(rhs.grcols),
57 			myprocrow(rhs.myprocrow), myproccol(rhs.myproccol), myrank(rhs.myrank) // copy constructor
58 	{
59 		MPI_Comm_dup(rhs.commWorld, &commWorld);
60 		MPI_Comm_dup(rhs.rowWorld, &rowWorld);
61 		MPI_Comm_dup(rhs.colWorld, &colWorld);
62 
63 		// don't use the shortcut ternary ? operator, C++ syntax fails as
64 		// mpich implements MPI::COMM_NULL of different type than MPI::IntraComm
65 		if(rhs.diagWorld == MPI_COMM_NULL)
66 			diagWorld = MPI_COMM_NULL;
67 		else
68 			MPI_Comm_dup(rhs.diagWorld,&diagWorld);
69 	}
70 
71 	CommGrid & operator=(const CommGrid & rhs)	// assignment operator
72 	{
73 		if(this != &rhs)
74 		{
75 			MPI_Comm_free(&commWorld);
76 			MPI_Comm_free(&rowWorld);
77 			MPI_Comm_free(&colWorld);
78 
79 			grrows = rhs.grrows;
80 			grcols = rhs.grcols;
81 			myrank = rhs.myrank;
82 			myprocrow = rhs.myprocrow;
83 			myproccol = rhs.myproccol;
84 
85 			MPI_Comm_dup(rhs.commWorld, &commWorld);
86 			MPI_Comm_dup(rhs.rowWorld, &rowWorld);
87 			MPI_Comm_dup(rhs.colWorld, &colWorld);
88 
89 			if(rhs.diagWorld == MPI_COMM_NULL)	diagWorld = MPI_COMM_NULL;
90 			else	MPI_Comm_dup(rhs.diagWorld,&diagWorld);
91 		}
92 		return *this;
93 	}
94 	void CreateDiagWorld();
95 
96 	bool operator== (const CommGrid & rhs) const;
97 	bool operator!= (const CommGrid & rhs) const
98 	{
99 		return (! (*this == rhs));
100 	}
101 	bool OnSameProcCol( int rhsrank );
102 	bool OnSameProcRow( int rhsrank );
103 
GetRank(int rowrank,int colrank)104 	int GetRank(int rowrank, int colrank) { return rowrank * grcols + colrank; }
GetRank(int diagrank)105 	int GetRank(int diagrank) { return diagrank * grcols + diagrank; }
GetRank()106 	int GetRank() { return myrank; }
GetRankInProcRow()107 	int GetRankInProcRow() { return myproccol; }
GetRankInProcCol()108 	int GetRankInProcCol() { return myprocrow; }
GetDiagRank()109 	int GetDiagRank()
110 	{
111 		int rank;
112 		MPI_Comm_rank(diagWorld, &rank);
113 		return rank;
114 	}
115 
116 	int GetRankInProcRow(int wholerank);
117 	int GetRankInProcCol(int wholerank);
118 
119 	int GetDiagOfProcRow();
120 	int GetDiagOfProcCol();
121 
GetComplementRank()122 	int GetComplementRank()	// For P(i,j), get rank of P(j,i)
123 	{
124 		return ((grcols * myproccol) + myprocrow);
125 	}
126 
GetWorld()127 	MPI_Comm & GetWorld() { return commWorld; }
GetRowWorld()128 	MPI_Comm & GetRowWorld() { return rowWorld; }
GetColWorld()129 	MPI_Comm & GetColWorld() { return colWorld; }
GetDiagWorld()130 	MPI_Comm & GetDiagWorld() { return diagWorld; }
GetWorld()131 	MPI_Comm GetWorld() const { return commWorld; }
GetRowWorld()132 	MPI_Comm GetRowWorld() const { return rowWorld; }
GetColWorld()133 	MPI_Comm GetColWorld() const { return colWorld; }
GetDiagWorld()134 	MPI_Comm GetDiagWorld() const { return diagWorld; }
135 
GetGridRows()136 	int GetGridRows() { return grrows; }
GetGridCols()137 	int GetGridCols() { return grcols; }
GetSize()138 	int GetSize() { return grrows * grcols; }
GetDiagSize()139 	int GetDiagSize()
140 	{
141 		int size;
142 		MPI_Comm_size(diagWorld, &size);
143 		return size;
144 	}
145 
146 	void OpenDebugFile(std::string prefix, std::ofstream & output) const;
147 
148 	friend std::shared_ptr<CommGrid> ProductGrid(CommGrid * gridA, CommGrid * gridB, int & innerdim, int & Aoffset, int & Boffset);
149 private:
150 	// A "normal" MPI-1 communicator is an intracommunicator; MPI::COMM_WORLD is also an MPI::Intracomm object
151 	MPI_Comm commWorld, rowWorld, colWorld, diagWorld;
152 
153 	// Processor grid is (grrow X grcol)
154 	int grrows, grcols;
155 	int myprocrow;
156 	int myproccol;
157 	int myrank;
158 
159 	template <class IT, class NT, class DER>
160 	friend class SpParMat;
161 
162 	template <class IT, class NT>
163 	friend class FullyDistSpVec;
164 };
165 
166 }
167 
168 #endif
169