1 /*
2 * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
3 * University Research and Technology
4 * Corporation. All rights reserved.
5 * Copyright (c) 2004-2013 The University of Tennessee and The University
6 * of Tennessee Research Foundation. All rights
7 * reserved.
8 * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
9 * University of Stuttgart. All rights reserved.
10 * Copyright (c) 2004-2005 The Regents of the University of California.
11 * All rights reserved.
12 * Copyright (c) 2012-2013 Inria. All rights reserved.
13 * Copyright (c) 2014 Cisco Systems, Inc. All rights reserved.
14 * Copyright (c) 2014-2015 Research Organization for Information Science
15 * and Technology (RIST). All rights reserved.
16 * $COPYRIGHT$
17 *
18 * Additional copyrights may follow
19 *
20 * $HEADER$
21 */
22
23 #include "ompi_config.h"
24 #include "ompi/mca/topo/base/base.h"
25 #include "ompi/mca/topo/topo.h"
26
27 /*
28 *
29 * function - makes a new communicator to which topology information
30 * has been attached
31 *
32 * @param comm_old input communicator without topology (handle)
33 * @param nnodes number of nodes in graph (integer)
34 * @param index array of integers describing node degrees (see below)
35 * @param edges array of integers describing graph edges (see below)
36 * @param reorder ranking may be reordered (true) or not (false) (logical)
37 * @param comm_graph communicator with graph topology added (handle)
38 *
39 * @retval MPI_SUCCESS
40 * @retval MPI_ERR_OUT_OF_RESOURCE
41 */
42
mca_topo_base_graph_create(mca_topo_base_module_t * topo,ompi_communicator_t * old_comm,int nnodes,const int * index,const int * edges,bool reorder,ompi_communicator_t ** comm_topo)43 int mca_topo_base_graph_create(mca_topo_base_module_t *topo,
44 ompi_communicator_t* old_comm,
45 int nnodes,
46 const int *index,
47 const int *edges,
48 bool reorder,
49 ompi_communicator_t** comm_topo)
50 {
51 ompi_communicator_t *new_comm;
52 int new_rank, num_procs, ret, i;
53 ompi_proc_t **topo_procs = NULL;
54 mca_topo_base_comm_graph_2_2_0_t* graph;
55
56 num_procs = old_comm->c_local_group->grp_proc_count;
57 new_rank = old_comm->c_local_group->grp_my_rank;
58 assert(topo->type == OMPI_COMM_GRAPH);
59
60 if( num_procs < nnodes ) {
61 return MPI_ERR_DIMS;
62 }
63 if( num_procs > nnodes ) {
64 num_procs = nnodes;
65 }
66 if( new_rank > (nnodes - 1) ) {
67 new_rank = MPI_UNDEFINED;
68 num_procs = 0;
69 nnodes = 0;
70 }
71
72 graph = OBJ_NEW(mca_topo_base_comm_graph_2_2_0_t);
73 if( NULL == graph ) {
74 return OMPI_ERR_OUT_OF_RESOURCE;
75 }
76 graph->nnodes = nnodes;
77
78 /* Don't do any of the other initialization if we're not supposed
79 to be part of the new communicator (because nnodes has been
80 reset to 0, making things like index[nnodes-1] be junk).
81
82 JMS: This should really be refactored to use
83 comm_create_group(), because ompi_comm_allocate() still
84 complains about 0-byte mallocs in debug builds for 0-member
85 groups. */
86 if (MPI_UNDEFINED != new_rank) {
87 graph->index = (int*)malloc(sizeof(int) * nnodes);
88 if (NULL == graph->index) {
89 OBJ_RELEASE(graph);
90 return OMPI_ERR_OUT_OF_RESOURCE;
91 }
92 memcpy(graph->index, index, nnodes * sizeof(int));
93
94 /* Graph communicator; copy the right data to the common information */
95 graph->edges = (int*)malloc(sizeof(int) * index[nnodes-1]);
96 if (NULL == graph->edges) {
97 OBJ_RELEASE(graph);
98 return OMPI_ERR_OUT_OF_RESOURCE;
99 }
100 memcpy(graph->edges, edges, index[nnodes-1] * sizeof(int));
101
102 topo_procs = (ompi_proc_t**)malloc(num_procs * sizeof(ompi_proc_t *));
103 if (NULL == topo_procs) {
104 OBJ_RELEASE(graph);
105 return OMPI_ERR_OUT_OF_RESOURCE;
106 }
107 if(OMPI_GROUP_IS_DENSE(old_comm->c_local_group)) {
108 memcpy(topo_procs,
109 old_comm->c_local_group->grp_proc_pointers,
110 num_procs * sizeof(ompi_proc_t *));
111 } else {
112 for(i = 0 ; i < num_procs; i++) {
113 topo_procs[i] = ompi_group_peer_lookup(old_comm->c_local_group,i);
114 }
115 }
116 }
117
118 /* allocate a new communicator */
119 new_comm = ompi_comm_allocate(nnodes, 0);
120 if (NULL == new_comm) {
121 free(topo_procs);
122 OBJ_RELEASE(graph);
123 return OMPI_ERR_OUT_OF_RESOURCE;
124 }
125
126 ret = ompi_comm_enable(old_comm, new_comm,
127 new_rank, num_procs, topo_procs);
128 if (OMPI_SUCCESS != ret) {
129 free(topo_procs);
130 OBJ_RELEASE(graph);
131 if (MPI_COMM_NULL != new_comm) {
132 new_comm->c_topo = NULL;
133 new_comm->c_flags &= ~OMPI_COMM_GRAPH;
134 ompi_comm_free (&new_comm);
135 }
136 return ret;
137 }
138
139 new_comm->c_topo = topo;
140 new_comm->c_topo->mtc.graph = graph;
141 new_comm->c_flags |= OMPI_COMM_GRAPH;
142 new_comm->c_topo->reorder = reorder;
143 *comm_topo = new_comm;
144
145 if( MPI_UNDEFINED == new_rank ) {
146 ompi_comm_free(&new_comm);
147 *comm_topo = MPI_COMM_NULL;
148 }
149
150 return OMPI_SUCCESS;
151 }
152
mca_topo_base_comm_graph_2_2_0_construct(mca_topo_base_comm_graph_2_2_0_t * graph)153 static void mca_topo_base_comm_graph_2_2_0_construct(mca_topo_base_comm_graph_2_2_0_t * graph) {
154 graph->nnodes = 0;
155 graph->index = NULL;
156 graph->edges = NULL;
157 }
158
mca_topo_base_comm_graph_2_2_0_destruct(mca_topo_base_comm_graph_2_2_0_t * graph)159 static void mca_topo_base_comm_graph_2_2_0_destruct(mca_topo_base_comm_graph_2_2_0_t * graph) {
160 if (NULL != graph->index) {
161 free(graph->index);
162 }
163 if (NULL != graph->edges) {
164 free(graph->edges);
165 }
166 }
167
168 OBJ_CLASS_INSTANCE(mca_topo_base_comm_graph_2_2_0_t, opal_object_t,
169 mca_topo_base_comm_graph_2_2_0_construct,
170 mca_topo_base_comm_graph_2_2_0_destruct);
171