1 #include <petsc/private/petscimpl.h> /*I "petscsys.h" I*/
2 /* ---------------------------------------------------------------- */
3 /*
4 A simple way to manage tags inside a communicator.
5
6 It uses the attributes to determine if a new communicator
7 is needed and to store the available tags.
8
9 */
10
11
12 /*@C
13 PetscObjectGetNewTag - Gets a unique new tag from a PETSc object. All
14 processors that share the object MUST call this routine EXACTLY the same
15 number of times. This tag should only be used with the current objects
16 communicator; do NOT use it with any other MPI communicator.
17
18 Collective on PetscObject
19
20 Input Parameter:
21 . obj - the PETSc object; this must be cast with a (PetscObject), for example,
22 PetscObjectGetNewTag((PetscObject)mat,&tag);
23
24 Output Parameter:
25 . tag - the new tag
26
27 Level: developer
28
29 .seealso: PetscCommGetNewTag()
30 @*/
PetscObjectGetNewTag(PetscObject obj,PetscMPIInt * tag)31 PetscErrorCode PetscObjectGetNewTag(PetscObject obj,PetscMPIInt *tag)
32 {
33 PetscErrorCode ierr;
34
35 PetscFunctionBegin;
36 ierr = PetscCommGetNewTag(obj->comm,tag);CHKERRQ(ierr);
37 PetscFunctionReturn(0);
38 }
39
40 /*@
41 PetscCommGetNewTag - Gets a unique new tag from a PETSc communicator. All
42 processors that share the communicator MUST call this routine EXACTLY the same
43 number of times. This tag should only be used with the current objects
44 communicator; do NOT use it with any other MPI communicator.
45
46 Collective
47
48 Input Parameter:
49 . comm - the MPI communicator
50
51 Output Parameter:
52 . tag - the new tag
53
54 Level: developer
55
56 .seealso: PetscObjectGetNewTag(), PetscCommDuplicate()
57 @*/
PetscCommGetNewTag(MPI_Comm comm,PetscMPIInt * tag)58 PetscErrorCode PetscCommGetNewTag(MPI_Comm comm,PetscMPIInt *tag)
59 {
60 PetscErrorCode ierr;
61 PetscCommCounter *counter;
62 PetscMPIInt *maxval,flg;
63
64 PetscFunctionBegin;
65 PetscValidIntPointer(tag,2);
66
67 ierr = MPI_Comm_get_attr(comm,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
68 if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Bad MPI communicator supplied; must be a PETSc communicator");
69
70 if (counter->tag < 1) {
71 ierr = PetscInfo1(NULL,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);CHKERRQ(ierr);
72 ierr = MPI_Comm_get_attr(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);CHKERRQ(ierr);
73 if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Comm_get_attr() is not returning a MPI_TAG_UB");
74 counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
75 }
76
77 *tag = counter->tag--;
78 if (PetscDefined(USE_DEBUG)) {
79 /*
80 Hanging here means that some processes have called PetscCommGetNewTag() and others have not.
81 */
82 ierr = MPI_Barrier(comm);CHKERRQ(ierr);
83 }
84 PetscFunctionReturn(0);
85 }
86
87 /*@C
88 PetscCommDuplicate - Duplicates the communicator only if it is not already a PETSc communicator.
89
90 Collective
91
92 Input Parameters:
93 . comm_in - Input communicator
94
95 Output Parameters:
96 + comm_out - Output communicator. May be comm_in.
97 - first_tag - Tag available that has not already been used with this communicator (you may
98 pass in NULL if you do not need a tag)
99
100 PETSc communicators are just regular MPI communicators that keep track of which
101 tags have been used to prevent tag conflict. If you pass a non-PETSc communicator into
102 a PETSc creation routine it will attach a private communicator for use in the objects communications.
103 The internal MPI_Comm is used to perform all the MPI calls for PETSc, the outer MPI_Comm is a user
104 level MPI_Comm that may be performing communication for the user or other library and so IS NOT used by PETSc.
105
106 Level: developer
107
108 .seealso: PetscObjectGetNewTag(), PetscCommGetNewTag(), PetscCommDestroy()
109 @*/
PetscCommDuplicate(MPI_Comm comm_in,MPI_Comm * comm_out,PetscMPIInt * first_tag)110 PetscErrorCode PetscCommDuplicate(MPI_Comm comm_in,MPI_Comm *comm_out,PetscMPIInt *first_tag)
111 {
112 PetscErrorCode ierr;
113 PetscCommCounter *counter;
114 PetscMPIInt *maxval,flg;
115
116 PetscFunctionBegin;
117 ierr = PetscSpinlockLock(&PetscCommSpinLock);CHKERRQ(ierr);
118 ierr = MPI_Comm_get_attr(comm_in,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
119
120 if (!flg) { /* this is NOT a PETSc comm */
121 union {MPI_Comm comm; void *ptr;} ucomm;
122 /* check if this communicator has a PETSc communicator imbedded in it */
123 ierr = MPI_Comm_get_attr(comm_in,Petsc_InnerComm_keyval,&ucomm,&flg);CHKERRQ(ierr);
124 if (!flg) {
125 /* This communicator is not yet known to this system, so we duplicate it and make an internal communicator */
126 ierr = MPI_Comm_dup(comm_in,comm_out);CHKERRQ(ierr);
127 ierr = MPI_Comm_get_attr(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);CHKERRQ(ierr);
128 if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Comm_get_attr() is not returning a MPI_TAG_UB");
129 ierr = PetscNew(&counter);CHKERRQ(ierr); /* all fields of counter are zero'ed */
130 counter->tag = *maxval;
131 ierr = MPI_Comm_set_attr(*comm_out,Petsc_Counter_keyval,counter);CHKERRQ(ierr);
132 ierr = PetscInfo3(NULL,"Duplicating a communicator %ld %ld max tags = %d\n",(long)comm_in,(long)*comm_out,*maxval);CHKERRQ(ierr);
133
134 /* save PETSc communicator inside user communicator, so we can get it next time */
135 ucomm.comm = *comm_out; /* ONLY the comm part of the union is significant. */
136 ierr = MPI_Comm_set_attr(comm_in,Petsc_InnerComm_keyval,ucomm.ptr);CHKERRQ(ierr);
137 ucomm.comm = comm_in;
138 ierr = MPI_Comm_set_attr(*comm_out,Petsc_OuterComm_keyval,ucomm.ptr);CHKERRQ(ierr);
139 } else {
140 *comm_out = ucomm.comm;
141 /* pull out the inner MPI_Comm and hand it back to the caller */
142 ierr = MPI_Comm_get_attr(*comm_out,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
143 if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Inner PETSc communicator does not have its tag/name counter attribute set");
144 ierr = PetscInfo2(NULL,"Using internal PETSc communicator %ld %ld\n",(long)comm_in,(long)*comm_out);CHKERRQ(ierr);
145 }
146 } else *comm_out = comm_in;
147
148 if (PetscDefined(USE_DEBUG)) {
149 /*
150 Hanging here means that some processes have called PetscCommDuplicate() and others have not.
151 This likley means that a subset of processes in a MPI_Comm have attempted to create a PetscObject!
152 ALL processes that share a communicator MUST shared objects created from that communicator.
153 */
154 ierr = MPI_Barrier(comm_in);CHKERRQ(ierr);
155 }
156
157 if (counter->tag < 1) {
158 ierr = PetscInfo1(NULL,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);CHKERRQ(ierr);
159 ierr = MPI_Comm_get_attr(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);CHKERRQ(ierr);
160 if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Comm_get_attr() is not returning a MPI_TAG_UB");
161 counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
162 }
163
164 if (first_tag) *first_tag = counter->tag--;
165
166 counter->refcount++; /* number of references to this comm */
167 ierr = PetscSpinlockUnlock(&PetscCommSpinLock);CHKERRQ(ierr);
168 PetscFunctionReturn(0);
169 }
170
171 /*@C
172 PetscCommDestroy - Frees communicator. Use in conjunction with PetscCommDuplicate().
173
174 Collective
175
176 Input Parameter:
177 . comm - the communicator to free
178
179 Level: developer
180
181 .seealso: PetscCommDuplicate()
182 @*/
PetscCommDestroy(MPI_Comm * comm)183 PetscErrorCode PetscCommDestroy(MPI_Comm *comm)
184 {
185 PetscErrorCode ierr;
186 PetscCommCounter *counter;
187 PetscMPIInt flg;
188 MPI_Comm icomm = *comm,ocomm;
189 union {MPI_Comm comm; void *ptr;} ucomm;
190
191 PetscFunctionBegin;
192 if (*comm == MPI_COMM_NULL) PetscFunctionReturn(0);
193 ierr = PetscSpinlockLock(&PetscCommSpinLock);CHKERRQ(ierr);
194 ierr = MPI_Comm_get_attr(icomm,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
195 if (!flg) { /* not a PETSc comm, check if it has an inner comm */
196 ierr = MPI_Comm_get_attr(icomm,Petsc_InnerComm_keyval,&ucomm,&flg);CHKERRQ(ierr);
197 if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"MPI_Comm does not have tag/name counter nor does it have inner MPI_Comm");
198 icomm = ucomm.comm;
199 ierr = MPI_Comm_get_attr(icomm,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
200 if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
201 }
202
203 counter->refcount--;
204
205 if (!counter->refcount) {
206 /* if MPI_Comm has outer comm then remove reference to inner MPI_Comm from outer MPI_Comm */
207 ierr = MPI_Comm_get_attr(icomm,Petsc_OuterComm_keyval,&ucomm,&flg);CHKERRQ(ierr);
208 if (flg) {
209 ocomm = ucomm.comm;
210 ierr = MPI_Comm_get_attr(ocomm,Petsc_InnerComm_keyval,&ucomm,&flg);CHKERRQ(ierr);
211 if (flg) {
212 ierr = MPI_Comm_delete_attr(ocomm,Petsc_InnerComm_keyval);CHKERRQ(ierr);
213 } else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Outer MPI_Comm %ld does not have expected reference to inner comm %d, problem with corrupted memory",(long int)ocomm,(long int)icomm);
214 }
215
216 ierr = PetscInfo1(NULL,"Deleting PETSc MPI_Comm %ld\n",(long)icomm);CHKERRQ(ierr);
217 ierr = MPI_Comm_free(&icomm);CHKERRQ(ierr);
218 }
219 *comm = MPI_COMM_NULL;
220 ierr = PetscSpinlockUnlock(&PetscCommSpinLock);CHKERRQ(ierr);
221 PetscFunctionReturn(0);
222 }
223
224 /*@C
225 PetscObjectsListGetGlobalNumbering - computes a global numbering
226 of PetscObjects living on subcommunicators of a given communicator.
227
228
229 Collective.
230
231 Input Parameters:
232 + comm - MPI_Comm
233 . len - local length of objlist
234 - objlist - a list of PETSc objects living on subcomms of comm and containing this comm rank
235 (subcomm ordering is assumed to be deadlock-free)
236
237 Output Parameters:
238 + count - global number of distinct subcommunicators on objlist (may be > len)
239 - numbering - global numbers of objlist entries (allocated by user)
240
241
242 Level: developer
243
244 @*/
PetscObjectsListGetGlobalNumbering(MPI_Comm comm,PetscInt len,PetscObject * objlist,PetscInt * count,PetscInt * numbering)245 PetscErrorCode PetscObjectsListGetGlobalNumbering(MPI_Comm comm, PetscInt len, PetscObject *objlist, PetscInt *count, PetscInt *numbering)
246 {
247 PetscErrorCode ierr;
248 PetscInt i, roots, offset;
249 PetscMPIInt size, rank;
250
251 PetscFunctionBegin;
252 PetscValidPointer(objlist,3);
253 if (!count && !numbering) PetscFunctionReturn(0);
254
255 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr);
256 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr);
257 roots = 0;
258 for (i = 0; i < len; ++i) {
259 PetscMPIInt srank;
260 ierr = MPI_Comm_rank(objlist[i]->comm, &srank);CHKERRQ(ierr);
261 /* Am I the root of the i-th subcomm? */
262 if (!srank) ++roots;
263 }
264 if (count) {
265 /* Obtain the sum of all roots -- the global number of distinct subcomms. */
266 ierr = MPIU_Allreduce(&roots,count,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr);
267 }
268 if (numbering){
269 /* Introduce a global numbering for subcomms, initially known only by subcomm roots. */
270 /*
271 At each subcomm root number all of the subcomms it owns locally
272 and make it global by calculating the shift among all of the roots.
273 The roots are ordered using the comm ordering.
274 */
275 ierr = MPI_Scan(&roots,&offset,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr);
276 offset -= roots;
277 /* Now we are ready to broadcast global subcomm numbers within each subcomm.*/
278 /*
279 This is where the assumption of a deadlock-free ordering of the subcomms is assumed:
280 broadcast is collective on the subcomm.
281 */
282 roots = 0;
283 for (i = 0; i < len; ++i) {
284 PetscMPIInt srank;
285 numbering[i] = offset + roots; /* only meaningful if !srank. */
286
287 ierr = MPI_Comm_rank(objlist[i]->comm, &srank);CHKERRQ(ierr);
288 ierr = MPI_Bcast(numbering+i,1,MPIU_INT,0,objlist[i]->comm);CHKERRQ(ierr);
289 if (!srank) ++roots;
290 }
291 }
292 PetscFunctionReturn(0);
293 }
294
295