1 /**
2 * Copyright (C) Mellanox Technologies Ltd. 2001-2018. ALL RIGHTS RESERVED.
3 *
4 * See file LICENSE for terms.
5 */
6 #ifdef HAVE_CONFIG_H
7 # include "config.h"
8 #endif
9
10 #ifdef HAVE_INFINIBAND_MLX5_HW_H
11
12 #include "ib_mlx5_hw.h"
13
14 #include <ucs/debug/log.h>
15 #include <ucs/debug/assert.h>
16 #include <ucs/arch/bitops.h>
17 #include <uct/ib/base/ib_verbs.h>
18 #include <infiniband/mlx5_hw.h>
19 #include <string.h>
20
21 /* Since this file intended to emulate DV using legacy mlx5_hw.h definitions
22 * we include DV declarations. */
23 #define UCT_IB_MLX5_H_
24 #include <uct/ib/mlx5/dv/ib_mlx5_dv.h>
25
uct_ib_mlx5_obj_error(const char * obj_name)26 static void UCS_F_MAYBE_UNUSED uct_ib_mlx5_obj_error(const char *obj_name)
27 {
28 ucs_error("Failed to get mlx5 %s information. Please make sure the installed "
29 "libmlx5 version matches the one UCX was compiled with (%s)",
30 obj_name, UCT_IB_LIBMLX5_VER);
31 }
32
33 #if !HAVE_DECL_MLX5DV_INIT_OBJ
uct_ib_mlx5_get_qp_info(struct ibv_qp * qp,uct_ib_mlx5dv_qp_t * qp_info)34 ucs_status_t uct_ib_mlx5_get_qp_info(struct ibv_qp *qp, uct_ib_mlx5dv_qp_t *qp_info)
35 {
36 #if HAVE_DECL_IBV_MLX5_EXP_GET_QP_INFO
37 struct ibv_mlx5_qp_info ibv_qp_info;
38 int ret;
39
40 ret = ibv_mlx5_exp_get_qp_info(qp, &ibv_qp_info);
41 if (ret != 0) {
42 uct_ib_mlx5_obj_error("qp");
43 return UCS_ERR_NO_DEVICE;
44 }
45
46 qp_info->dv.dbrec = ibv_qp_info.dbrec;
47 qp_info->dv.sq.buf = ibv_qp_info.sq.buf;
48 qp_info->dv.sq.wqe_cnt = ibv_qp_info.sq.wqe_cnt;
49 qp_info->dv.sq.stride = ibv_qp_info.sq.stride;
50 qp_info->dv.rq.buf = ibv_qp_info.rq.buf;
51 qp_info->dv.rq.wqe_cnt = ibv_qp_info.rq.wqe_cnt;
52 qp_info->dv.rq.stride = ibv_qp_info.rq.stride;
53 qp_info->dv.bf.reg = ibv_qp_info.bf.reg;
54 qp_info->dv.bf.size = ibv_qp_info.bf.size;
55 #else
56 struct mlx5_qp *mqp = ucs_container_of(qp, struct mlx5_qp, verbs_qp.qp);
57
58 if ((mqp->sq.cur_post != 0) || (mqp->rq.head != 0)) {
59 ucs_warn("cur_post=%d head=%d need_lock=%d", mqp->sq.cur_post,
60 mqp->rq.head, mqp->bf->need_lock);
61 return UCS_ERR_NO_DEVICE;
62 }
63
64 qp_info->dv.qpn = qp->qp_num;
65 qp_info->dv.dbrec = mqp->db;
66 qp_info->dv.sq.buf = mqp->buf.buf + mqp->sq.offset;
67 qp_info->dv.sq.wqe_cnt = mqp->sq.wqe_cnt;
68 qp_info->dv.sq.stride = 1 << mqp->sq.wqe_shift;
69 qp_info->dv.rq.buf = mqp->buf.buf + mqp->rq.offset;
70 qp_info->dv.rq.wqe_cnt = mqp->rq.wqe_cnt;
71 qp_info->dv.rq.stride = 1 << mqp->rq.wqe_shift;
72 qp_info->dv.bf.reg = mqp->bf->reg;
73
74 if (mqp->bf->uuarn > 0) {
75 qp_info->dv.bf.size = mqp->bf->buf_size;
76 } else {
77 qp_info->dv.bf.size = 0; /* No BF */
78 }
79 #endif
80 return UCS_OK;
81 }
82
uct_ib_mlx5_get_srq_info(struct ibv_srq * srq,uct_ib_mlx5dv_srq_t * srq_info)83 ucs_status_t uct_ib_mlx5_get_srq_info(struct ibv_srq *srq,
84 uct_ib_mlx5dv_srq_t *srq_info)
85 {
86 #if HAVE_DECL_IBV_MLX5_EXP_GET_SRQ_INFO
87 struct ibv_mlx5_srq_info ibv_srq_info;
88 int ret;
89
90 ret = ibv_mlx5_exp_get_srq_info(srq, &ibv_srq_info);
91 if (ret != 0) {
92 uct_ib_mlx5_obj_error("srq");
93 return UCS_ERR_NO_DEVICE;
94 }
95
96 srq_info->dv.buf = ibv_srq_info.buf;
97 srq_info->dv.dbrec = ibv_srq_info.dbrec;
98 srq_info->dv.stride = ibv_srq_info.stride;
99 srq_info->dv.head = ibv_srq_info.head;
100 srq_info->dv.tail = ibv_srq_info.tail;
101 #else
102 struct mlx5_srq *msrq;
103
104 if (srq->handle == LEGACY_XRC_SRQ_HANDLE) {
105 srq = (struct ibv_srq *)(((struct ibv_srq_legacy *)srq)->ibv_srq);
106 }
107
108 msrq = ucs_container_of(srq, struct mlx5_srq, vsrq.srq);
109
110 if (msrq->counter != 0) {
111 ucs_error("SRQ counter is not 0 (%d)", msrq->counter);
112 return UCS_ERR_NO_DEVICE;
113 }
114
115 srq_info->dv.buf = msrq->buf.buf;
116 srq_info->dv.dbrec = msrq->db;
117 srq_info->dv.stride = 1 << msrq->wqe_shift;
118 srq_info->dv.head = msrq->head;
119 srq_info->dv.tail = msrq->tail;
120 #endif
121 return UCS_OK;
122 }
123
uct_ib_mlx5_get_cq(struct ibv_cq * cq,uct_ib_mlx5dv_cq_t * mlx5_cq)124 static ucs_status_t uct_ib_mlx5_get_cq(struct ibv_cq *cq, uct_ib_mlx5dv_cq_t *mlx5_cq)
125 {
126 #if HAVE_DECL_IBV_MLX5_EXP_GET_CQ_INFO
127 struct ibv_mlx5_cq_info ibv_cq_info;
128 int ret;
129
130 ret = ibv_mlx5_exp_get_cq_info(cq, &ibv_cq_info);
131 if (ret != 0) {
132 uct_ib_mlx5_obj_error("cq");
133 return UCS_ERR_NO_DEVICE;
134 }
135
136 mlx5_cq->dv.buf = ibv_cq_info.buf;
137 mlx5_cq->dv.cqe_cnt = ibv_cq_info.cqe_cnt;
138 mlx5_cq->dv.cqn = ibv_cq_info.cqn;
139 mlx5_cq->dv.cqe_size = ibv_cq_info.cqe_size;
140 #else
141 struct mlx5_cq *mcq = ucs_container_of(cq, struct mlx5_cq, ibv_cq);
142 int ret;
143
144 if (mcq->cons_index != 0) {
145 ucs_error("CQ consumer index is not 0 (%d)", mcq->cons_index);
146 return UCS_ERR_NO_DEVICE;
147 }
148
149 mlx5_cq->dv.buf = mcq->active_buf->buf;
150 mlx5_cq->dv.cqe_cnt = mcq->ibv_cq.cqe + 1;
151 mlx5_cq->dv.cqn = mcq->cqn;
152 mlx5_cq->dv.cqe_size = mcq->cqe_sz;
153 #endif
154 return UCS_OK;
155 }
156
uct_ib_mlx5dv_init_obj(uct_ib_mlx5dv_t * obj,uint64_t obj_type)157 ucs_status_t uct_ib_mlx5dv_init_obj(uct_ib_mlx5dv_t *obj, uint64_t obj_type)
158 {
159 ucs_status_t ret = UCS_OK;
160
161 if (obj_type & MLX5DV_OBJ_QP) {
162 ret = uct_ib_mlx5_get_qp_info(obj->dv.qp.in,
163 ucs_container_of(obj->dv.qp.out, uct_ib_mlx5dv_qp_t, dv));
164 }
165
166 if (!ret && (obj_type & MLX5DV_OBJ_CQ)) {
167 ret = uct_ib_mlx5_get_cq(obj->dv.cq.in,
168 ucs_container_of(obj->dv.cq.out, uct_ib_mlx5dv_cq_t, dv));
169 }
170
171 if (!ret && (obj_type & MLX5DV_OBJ_SRQ)) {
172 ret = uct_ib_mlx5_get_srq_info(obj->dv.srq.in,
173 ucs_container_of(obj->dv.srq.out, uct_ib_mlx5dv_srq_t, dv));
174 }
175
176 #ifdef HAVE_IBV_EXP_DM
177 if (!ret && (obj_type & MLX5DV_OBJ_DM)) {
178 ret = uct_ib_mlx5_get_dm_info(obj->dv_dm.in, obj->dv_dm.out);
179 }
180 #endif
181
182 return ret;
183 }
184 #endif
185
uct_ib_mlx5_update_cq_ci(struct ibv_cq * cq,unsigned cq_ci)186 void uct_ib_mlx5_update_cq_ci(struct ibv_cq *cq, unsigned cq_ci)
187 {
188 #if HAVE_DECL_IBV_MLX5_EXP_UPDATE_CQ_CI
189 ibv_mlx5_exp_update_cq_ci(cq, cq_ci);
190 #else
191 struct mlx5_cq *mcq = ucs_container_of(cq, struct mlx5_cq, ibv_cq);
192 mcq->cons_index = cq_ci;
193 #endif
194 }
195
uct_ib_mlx5_get_cq_ci(struct ibv_cq * cq)196 unsigned uct_ib_mlx5_get_cq_ci(struct ibv_cq *cq)
197 {
198 struct mlx5_cq *mcq = ucs_container_of(cq, struct mlx5_cq, ibv_cq);
199 return mcq->cons_index;
200 }
201
202 #if !HAVE_DECL_MLX5DV_OBJ_AH
uct_ib_mlx5_get_av(struct ibv_ah * ah,struct mlx5_wqe_av * av)203 void uct_ib_mlx5_get_av(struct ibv_ah *ah, struct mlx5_wqe_av *av)
204 {
205 memcpy(av, &ucs_container_of(ah, struct mlx5_ah, ibv_ah)->av, sizeof(*av));
206 }
207 #endif
208
uct_dv_get_cmd_qp(struct ibv_srq * srq)209 struct ibv_qp *uct_dv_get_cmd_qp(struct ibv_srq *srq)
210 {
211 #ifdef HAVE_STRUCT_MLX5_SRQ_CMD_QP
212 struct mlx5_srq *msrq;
213
214 if (srq->handle == LEGACY_XRC_SRQ_HANDLE) {
215 srq = (struct ibv_srq *)(((struct ibv_srq_legacy *)srq)->ibv_srq);
216 }
217
218 msrq = ucs_container_of(srq, struct mlx5_srq, vsrq.srq);
219 if (msrq->counter != 0) {
220 ucs_error("SRQ counter is not 0 (%d)", msrq->counter);
221 return NULL;
222 }
223
224 return &msrq->cmd_qp->verbs_qp.qp;
225 #else
226 return NULL;
227 #endif
228 }
229
230 struct mlx5_uar_data {
231 enum { __DUMMY } map_type;
232 void *regs;
233 };
234
uct_dv_get_info_uar0(void * uar)235 void *uct_dv_get_info_uar0(void *uar)
236 {
237 #if HAVE_DECL_MLX5DV_INIT_OBJ
238 struct mlx5_uar_data *muar = uar;
239 return muar[0].regs;
240 #else
241 return NULL;
242 #endif
243 }
244
245 #endif
246