1 /**
2 * Copyright (C) Mellanox Technologies Ltd. 2001-2014.  ALL RIGHTS RESERVED.
3 *
4 * See file LICENSE for terms.
5 */
6 
7 #ifdef HAVE_CONFIG_H
8 #  include "config.h"
9 #endif
10 
11 #include "rc_verbs.h"
12 #include "rc_verbs_impl.h"
13 
14 #include <uct/api/uct.h>
15 #include <uct/ib/mlx5/exp/ib_exp.h>
16 #include <uct/ib/rc/base/rc_iface.h>
17 #include <uct/ib/base/ib_device.h>
18 #include <uct/ib/base/ib_log.h>
19 #include <uct/base/uct_md.h>
20 #include <ucs/arch/bitops.h>
21 #include <ucs/arch/cpu.h>
22 #include <ucs/debug/log.h>
23 #include <string.h>
24 
25 static uct_rc_iface_ops_t uct_rc_verbs_iface_ops;
26 
27 static ucs_config_field_t uct_rc_verbs_iface_config_table[] = {
28   {"RC_", "", NULL,
29    ucs_offsetof(uct_rc_verbs_iface_config_t, super),
30    UCS_CONFIG_TYPE_TABLE(uct_rc_iface_config_table)},
31 
32   {"MAX_AM_HDR", "128",
33    "Buffer size to reserve for active message headers. If set to 0, the transport will\n"
34    "not support zero-copy active messages.",
35    ucs_offsetof(uct_rc_verbs_iface_config_t, max_am_hdr), UCS_CONFIG_TYPE_MEMUNITS},
36 
37   {"TX_MAX_WR", "-1",
38    "Limits the number of outstanding posted work requests. The actual limit is\n"
39    "a minimum between this value and the TX queue length. -1 means no limit.",
40    ucs_offsetof(uct_rc_verbs_iface_config_t, tx_max_wr), UCS_CONFIG_TYPE_UINT},
41 
42   {NULL}
43 };
44 
uct_rc_verbs_handle_failure(uct_ib_iface_t * ib_iface,void * arg,ucs_status_t status)45 static void uct_rc_verbs_handle_failure(uct_ib_iface_t *ib_iface, void *arg,
46                                         ucs_status_t status)
47 {
48     struct ibv_wc     *wc      = arg;
49     uct_rc_iface_t    *iface   = ucs_derived_of(ib_iface, uct_rc_iface_t);
50     ucs_log_level_t    log_lvl = UCS_LOG_LEVEL_FATAL;
51     uct_rc_verbs_ep_t *ep;
52 
53     ep = ucs_derived_of(uct_rc_iface_lookup_ep(iface, wc->qp_num),
54                         uct_rc_verbs_ep_t);
55     if (!ep) {
56         return;
57     }
58 
59     if (uct_rc_verbs_ep_handle_failure(ep, status) == UCS_OK) {
60         log_lvl = iface->super.super.config.failure_level;
61     }
62 
63     ucs_log(log_lvl,
64             "send completion with error: %s qpn 0x%x wrid 0x%lx vendor_err 0x%x",
65             ibv_wc_status_str(wc->status), wc->qp_num, wc->wr_id, wc->vendor_err);
66 }
67 
uct_rc_verbs_ep_set_failed(uct_ib_iface_t * iface,uct_ep_h ep,ucs_status_t status)68 static ucs_status_t uct_rc_verbs_ep_set_failed(uct_ib_iface_t *iface,
69                                                uct_ep_h ep, ucs_status_t status)
70 {
71     return uct_set_ep_failed(&UCS_CLASS_NAME(uct_rc_verbs_ep_t), ep,
72                              &iface->super.super, status);
73 }
74 
uct_rc_verbs_wc_to_ucs_status(enum ibv_wc_status status)75 ucs_status_t uct_rc_verbs_wc_to_ucs_status(enum ibv_wc_status status)
76 {
77     switch (status)
78     {
79     case IBV_WC_SUCCESS:
80         return UCS_OK;
81     case IBV_WC_RETRY_EXC_ERR:
82     case IBV_WC_RNR_RETRY_EXC_ERR:
83         return UCS_ERR_ENDPOINT_TIMEOUT;
84     default:
85         return UCS_ERR_IO_ERROR;
86     }
87 }
88 
89 static UCS_F_ALWAYS_INLINE unsigned
uct_rc_verbs_iface_poll_tx(uct_rc_verbs_iface_t * iface)90 uct_rc_verbs_iface_poll_tx(uct_rc_verbs_iface_t *iface)
91 {
92     uct_rc_verbs_ep_t *ep;
93     uint16_t count;
94     int i;
95     unsigned num_wcs = iface->super.super.config.tx_max_poll;
96     struct ibv_wc wc[num_wcs];
97     ucs_status_t status;
98 
99     UCT_RC_VERBS_IFACE_FOREACH_TXWQE(&iface->super, i, wc, num_wcs) {
100         ep = ucs_derived_of(uct_rc_iface_lookup_ep(&iface->super, wc[i].qp_num),
101                             uct_rc_verbs_ep_t);
102         if (ucs_unlikely((wc[i].status != IBV_WC_SUCCESS) || (ep == NULL))) {
103             status = uct_rc_verbs_wc_to_ucs_status(wc[i].status);
104             iface->super.super.ops->handle_failure(&iface->super.super, &wc[i],
105                                                    status);
106             continue;
107         }
108 
109         count = uct_rc_verbs_txcq_get_comp_count(&wc[i], &ep->super.txqp);
110         ucs_trace_poll("rc_verbs iface %p tx_wc wrid 0x%lx ep %p qpn 0x%x count %d",
111                        iface, wc[i].wr_id, ep, wc[i].qp_num, count);
112         uct_rc_verbs_txqp_completed(&ep->super.txqp, &ep->txcnt, count);
113         iface->super.tx.cq_available += count;
114 
115        /* process pending elements prior to CQ entries to avoid out-of-order
116         * transmission in completion callbacks */
117         ucs_arbiter_group_schedule(&iface->super.tx.arbiter,
118                                    &ep->super.arb_group);
119         ucs_arbiter_dispatch(&iface->super.tx.arbiter, 1,
120                              uct_rc_ep_process_pending, NULL);
121 
122         uct_rc_txqp_completion_desc(&ep->super.txqp, ep->txcnt.ci);
123     }
124 
125     return num_wcs;
126 }
127 
uct_rc_verbs_iface_progress(void * arg)128 static unsigned uct_rc_verbs_iface_progress(void *arg)
129 {
130     uct_rc_verbs_iface_t *iface = arg;
131     unsigned count;
132 
133     count = uct_rc_verbs_iface_poll_rx_common(iface);
134     if (count > 0) {
135         return count;
136     }
137 
138     return uct_rc_verbs_iface_poll_tx(iface);
139 }
140 
uct_rc_verbs_iface_init_inl_wrs(uct_rc_verbs_iface_t * iface)141 static void uct_rc_verbs_iface_init_inl_wrs(uct_rc_verbs_iface_t *iface)
142 {
143     memset(&iface->inl_am_wr, 0, sizeof(iface->inl_am_wr));
144     iface->inl_am_wr.sg_list        = iface->inl_sge;
145     iface->inl_am_wr.num_sge        = 2;
146     iface->inl_am_wr.opcode         = IBV_WR_SEND;
147     iface->inl_am_wr.send_flags     = IBV_SEND_INLINE;
148 
149     memset(&iface->inl_rwrite_wr, 0, sizeof(iface->inl_rwrite_wr));
150     iface->inl_rwrite_wr.sg_list    = iface->inl_sge;
151     iface->inl_rwrite_wr.num_sge    = 1;
152     iface->inl_rwrite_wr.opcode     = IBV_WR_RDMA_WRITE;
153     iface->inl_rwrite_wr.send_flags = IBV_SEND_SIGNALED | IBV_SEND_INLINE;
154 }
155 
uct_rc_verbs_iface_query(uct_iface_h tl_iface,uct_iface_attr_t * iface_attr)156 static ucs_status_t uct_rc_verbs_iface_query(uct_iface_h tl_iface, uct_iface_attr_t *iface_attr)
157 {
158     uct_rc_verbs_iface_t *iface = ucs_derived_of(tl_iface, uct_rc_verbs_iface_t);
159     uct_ib_md_t *md             = uct_ib_iface_md(ucs_derived_of(iface, uct_ib_iface_t));
160     uint8_t mr_id;
161     ucs_status_t status;
162 
163     status = uct_rc_iface_query(&iface->super, iface_attr,
164                                 iface->config.max_inline,
165                                 iface->config.max_inline,
166                                 iface->config.short_desc_size,
167                                 iface->config.max_send_sge - 1,
168                                 sizeof(uct_rc_hdr_t),
169                                 iface->config.max_send_sge);
170     if (status != UCS_OK) {
171         return status;
172     }
173 
174     iface_attr->latency.m += 1e-9;  /* 1 ns per each extra QP */
175     iface_attr->overhead   = 75e-9; /* Software overhead */
176 
177     iface_attr->ep_addr_len = sizeof(uct_rc_verbs_ep_address_t);
178     if (md->ops->get_atomic_mr_id(md, &mr_id) == UCS_OK) {
179         iface_attr->ep_addr_len += sizeof(mr_id);
180     }
181 
182     return UCS_OK;
183 }
184 
uct_rc_verbs_iface_flush_mem_create(uct_rc_verbs_iface_t * iface)185 ucs_status_t uct_rc_verbs_iface_flush_mem_create(uct_rc_verbs_iface_t *iface)
186 {
187     uct_ib_md_t *md = uct_ib_iface_md(&iface->super.super);
188     ucs_status_t status;
189     struct ibv_mr *mr;
190     void *mem;
191 
192     if (iface->flush_mr != NULL) {
193         ucs_assert(iface->flush_mem != NULL);
194         return UCS_OK;
195     }
196 
197     /*
198      * Map a whole page for the remote side to issue a dummy RDMA_WRITE on it,
199      * to flush its outstanding operations. A whole page is used to prevent any
200      * other allocations from using same page, so it would be fork-safe.
201      */
202     mem = ucs_mmap(NULL, ucs_get_page_size(), PROT_READ|PROT_WRITE,
203                    MAP_PRIVATE|MAP_ANONYMOUS, -1, 0, "flush_mem");
204     if (mem == MAP_FAILED) {
205         ucs_error("failed to allocate page for remote flush: %m");
206         status = UCS_ERR_NO_MEMORY;
207         goto err;
208     }
209 
210     status = uct_ib_reg_mr(md->pd, mem, ucs_get_page_size(),
211                            UCT_IB_MEM_ACCESS_FLAGS, &mr);
212     if (status != UCS_OK) {
213         goto err_munmap;
214     }
215 
216     iface->flush_mem = mem;
217     iface->flush_mr  = mr;
218     return UCS_OK;
219 
220 err_munmap:
221     ucs_munmap(mem, ucs_get_page_size());
222 err:
223     return status;
224 }
225 
226 static ucs_status_t
uct_rc_iface_verbs_init_rx(uct_rc_iface_t * rc_iface,const uct_rc_iface_common_config_t * config)227 uct_rc_iface_verbs_init_rx(uct_rc_iface_t *rc_iface,
228                            const uct_rc_iface_common_config_t *config)
229 {
230     uct_rc_verbs_iface_t *iface = ucs_derived_of(rc_iface, uct_rc_verbs_iface_t);
231 
232     return uct_rc_iface_init_rx(rc_iface, config, &iface->srq);
233 }
234 
uct_rc_iface_verbs_cleanup_rx(uct_rc_iface_t * rc_iface)235 void uct_rc_iface_verbs_cleanup_rx(uct_rc_iface_t *rc_iface)
236 {
237     uct_rc_verbs_iface_t *iface = ucs_derived_of(rc_iface, uct_rc_verbs_iface_t);
238 
239     /* TODO flush RX buffers */
240     uct_ib_destroy_srq(iface->srq);
241 }
242 
UCS_CLASS_INIT_FUNC(uct_rc_verbs_iface_t,uct_md_h tl_md,uct_worker_h worker,const uct_iface_params_t * params,const uct_iface_config_t * tl_config)243 static UCS_CLASS_INIT_FUNC(uct_rc_verbs_iface_t, uct_md_h tl_md,
244                            uct_worker_h worker, const uct_iface_params_t *params,
245                            const uct_iface_config_t *tl_config)
246 {
247     uct_rc_verbs_iface_config_t *config =
248                     ucs_derived_of(tl_config, uct_rc_verbs_iface_config_t);
249     ucs_status_t status;
250     uct_ib_iface_init_attr_t init_attr = {};
251     uct_ib_qp_attr_t attr = {};
252     struct ibv_qp *qp;
253     uct_rc_hdr_t *hdr;
254 
255     init_attr.fc_req_size            = sizeof(uct_rc_fc_request_t);
256     init_attr.rx_hdr_len             = sizeof(uct_rc_hdr_t);
257     init_attr.qp_type                = IBV_QPT_RC;
258     init_attr.cq_len[UCT_IB_DIR_RX]  = config->super.super.super.rx.queue_len;
259     init_attr.cq_len[UCT_IB_DIR_TX]  = config->super.tx_cq_len;
260     init_attr.seg_size               = config->super.super.super.seg_size;
261 
262     UCS_CLASS_CALL_SUPER_INIT(uct_rc_iface_t, &uct_rc_verbs_iface_ops, tl_md,
263                               worker, params, &config->super.super, &init_attr);
264 
265     self->config.tx_max_wr           = ucs_min(config->tx_max_wr,
266                                                self->super.config.tx_qp_len);
267     self->super.config.tx_moderation = ucs_min(config->super.tx_cq_moderation,
268                                                self->config.tx_max_wr / 4);
269     self->super.config.fence_mode    = (uct_rc_fence_mode_t)config->super.super.fence_mode;
270     self->super.progress             = uct_rc_verbs_iface_progress;
271     self->flush_mem                  = NULL;
272     self->flush_mr                   = NULL;
273 
274     if ((config->super.super.fence_mode == UCT_RC_FENCE_MODE_WEAK) ||
275         (config->super.super.fence_mode == UCT_RC_FENCE_MODE_AUTO)) {
276         self->super.config.fence_mode = UCT_RC_FENCE_MODE_WEAK;
277     } else if (config->super.super.fence_mode == UCT_RC_FENCE_MODE_NONE) {
278         self->super.config.fence_mode = UCT_RC_FENCE_MODE_NONE;
279     } else {
280         ucs_error("incorrect fence value: %d", self->super.config.fence_mode);
281         status = UCS_ERR_INVALID_PARAM;
282         goto err;
283     }
284 
285     memset(self->inl_sge, 0, sizeof(self->inl_sge));
286     uct_rc_am_hdr_fill(&self->am_inl_hdr.rc_hdr, 0);
287 
288     /* Configuration */
289     self->config.short_desc_size = ucs_max(sizeof(uct_rc_hdr_t),
290                                            config->max_am_hdr);
291     self->config.short_desc_size = ucs_max(UCT_IB_MAX_ATOMIC_SIZE,
292                                            self->config.short_desc_size);
293 
294     /* Create AM headers and Atomic mempool */
295     status = uct_iface_mpool_init(&self->super.super.super,
296                                   &self->short_desc_mp,
297                                   sizeof(uct_rc_iface_send_desc_t) +
298                                       self->config.short_desc_size,
299                                   sizeof(uct_rc_iface_send_desc_t),
300                                   UCS_SYS_CACHE_LINE_SIZE,
301                                   &config->super.super.super.tx.mp,
302                                   self->super.config.tx_qp_len,
303                                   uct_rc_iface_send_desc_init,
304                                   "rc_verbs_short_desc");
305     if (status != UCS_OK) {
306         goto err;
307     }
308 
309     uct_rc_verbs_iface_init_inl_wrs(self);
310 
311     /* Check FC parameters correctness */
312     status = uct_rc_init_fc_thresh(&config->super, &self->super);
313     if (status != UCS_OK) {
314         goto err_common_cleanup;
315     }
316 
317     /* Create a dummy QP in order to find out max_inline */
318     uct_ib_exp_qp_fill_attr(&self->super.super, &attr);
319     status = uct_rc_iface_qp_create(&self->super, &qp, &attr,
320                                     self->super.config.tx_qp_len,
321                                     self->srq);
322     if (status != UCS_OK) {
323         goto err_common_cleanup;
324     }
325     uct_ib_destroy_qp(qp);
326 
327     self->config.max_inline   = attr.cap.max_inline_data;
328     self->config.max_send_sge = ucs_min(UCT_IB_MAX_IOV, attr.cap.max_send_sge);
329     ucs_assertv_always(self->config.max_send_sge > 1, /* need 1 iov for am header*/
330                        "max_send_sge %zu", self->config.max_send_sge);
331 
332     if (self->config.max_inline < sizeof(*hdr)) {
333         self->fc_desc = ucs_mpool_get(&self->short_desc_mp);
334         ucs_assert_always(self->fc_desc != NULL);
335         hdr        = (uct_rc_hdr_t*)(self->fc_desc + 1);
336         hdr->am_id = UCT_RC_EP_FC_PURE_GRANT;
337     } else {
338         self->fc_desc = NULL;
339     }
340 
341     return UCS_OK;
342 
343 err_common_cleanup:
344     ucs_mpool_cleanup(&self->short_desc_mp, 1);
345 err:
346     return status;
347 }
348 
uct_rc_verbs_iface_common_prepost_recvs(uct_rc_verbs_iface_t * iface,unsigned max)349 ucs_status_t uct_rc_verbs_iface_common_prepost_recvs(uct_rc_verbs_iface_t *iface,
350                                                      unsigned max)
351 {
352     unsigned count;
353 
354     count = ucs_min(max, iface->super.rx.srq.quota);
355     iface->super.rx.srq.available += count;
356     iface->super.rx.srq.quota     -= count;
357     while (iface->super.rx.srq.available > 0) {
358         if (uct_rc_verbs_iface_post_recv_common(iface, 1) == 0) {
359             ucs_error("failed to post receives");
360             return UCS_ERR_NO_MEMORY;
361         }
362     }
363     return UCS_OK;
364 }
365 
uct_rc_verbs_iface_common_progress_enable(uct_iface_h tl_iface,unsigned flags)366 void uct_rc_verbs_iface_common_progress_enable(uct_iface_h tl_iface, unsigned flags)
367 {
368     uct_rc_verbs_iface_t *iface = ucs_derived_of(tl_iface, uct_rc_verbs_iface_t);
369 
370     if (flags & UCT_PROGRESS_RECV) {
371         /* ignore return value from prepost_recv, since it's not really possible
372          * to handle here, and some receives were already pre-posted during iface
373          * creation anyway.
374          */
375         uct_rc_verbs_iface_common_prepost_recvs(iface, UINT_MAX);
376     }
377 
378     uct_base_iface_progress_enable_cb(&iface->super.super.super,
379                                       iface->super.progress,
380                                       flags);
381 }
382 
uct_rc_verbs_iface_post_recv_always(uct_rc_verbs_iface_t * iface,unsigned max)383 unsigned uct_rc_verbs_iface_post_recv_always(uct_rc_verbs_iface_t *iface, unsigned max)
384 {
385     struct ibv_recv_wr *bad_wr;
386     uct_ib_recv_wr_t *wrs;
387     unsigned count;
388     int ret;
389 
390     wrs  = ucs_alloca(sizeof *wrs  * max);
391 
392     count = uct_ib_iface_prepare_rx_wrs(&iface->super.super, &iface->super.rx.mp,
393                                         wrs, max);
394     if (ucs_unlikely(count == 0)) {
395         return 0;
396     }
397 
398     ret = ibv_post_srq_recv(iface->srq, &wrs[0].ibwr, &bad_wr);
399     if (ret != 0) {
400         ucs_fatal("ibv_post_srq_recv() returned %d: %m", ret);
401     }
402     iface->super.rx.srq.available -= count;
403 
404     return count;
405 }
406 
UCS_CLASS_CLEANUP_FUNC(uct_rc_verbs_iface_t)407 static UCS_CLASS_CLEANUP_FUNC(uct_rc_verbs_iface_t)
408 {
409     uct_base_iface_progress_disable(&self->super.super.super.super,
410                                     UCT_PROGRESS_SEND | UCT_PROGRESS_RECV);
411 
412     if (self->flush_mr != NULL) {
413         uct_ib_dereg_mr(self->flush_mr);
414         ucs_assert(self->flush_mem != NULL);
415         ucs_munmap(self->flush_mem, ucs_get_page_size());
416     }
417     if (self->fc_desc != NULL) {
418         ucs_mpool_put(self->fc_desc);
419     }
420     ucs_mpool_cleanup(&self->short_desc_mp, 1);
421 }
422 
423 UCS_CLASS_DEFINE(uct_rc_verbs_iface_t, uct_rc_iface_t);
424 static UCS_CLASS_DEFINE_NEW_FUNC(uct_rc_verbs_iface_t, uct_iface_t, uct_md_h,
425                                  uct_worker_h, const uct_iface_params_t*,
426                                  const uct_iface_config_t*);
427 static UCS_CLASS_DEFINE_DELETE_FUNC(uct_rc_verbs_iface_t, uct_iface_t);
428 
429 static uct_rc_iface_ops_t uct_rc_verbs_iface_ops = {
430     {
431     {
432     .ep_am_short              = uct_rc_verbs_ep_am_short,
433     .ep_am_bcopy              = uct_rc_verbs_ep_am_bcopy,
434     .ep_am_zcopy              = uct_rc_verbs_ep_am_zcopy,
435     .ep_put_short             = uct_rc_verbs_ep_put_short,
436     .ep_put_bcopy             = uct_rc_verbs_ep_put_bcopy,
437     .ep_put_zcopy             = uct_rc_verbs_ep_put_zcopy,
438     .ep_get_bcopy             = uct_rc_verbs_ep_get_bcopy,
439     .ep_get_zcopy             = uct_rc_verbs_ep_get_zcopy,
440     .ep_atomic_cswap64        = uct_rc_verbs_ep_atomic_cswap64,
441     .ep_atomic64_post         = uct_rc_verbs_ep_atomic64_post,
442     .ep_atomic64_fetch        = uct_rc_verbs_ep_atomic64_fetch,
443     .ep_atomic_cswap32        = (uct_ep_atomic_cswap32_func_t)ucs_empty_function_return_unsupported,
444     .ep_atomic32_post         = (uct_ep_atomic32_post_func_t)ucs_empty_function_return_unsupported,
445     .ep_atomic32_fetch        = (uct_ep_atomic32_fetch_func_t)ucs_empty_function_return_unsupported,
446     .ep_pending_add           = uct_rc_ep_pending_add,
447     .ep_pending_purge         = uct_rc_ep_pending_purge,
448     .ep_flush                 = uct_rc_verbs_ep_flush,
449     .ep_fence                 = uct_rc_verbs_ep_fence,
450     .ep_create                = UCS_CLASS_NEW_FUNC_NAME(uct_rc_verbs_ep_t),
451     .ep_destroy               = UCS_CLASS_DELETE_FUNC_NAME(uct_rc_verbs_ep_t),
452     .ep_get_address           = uct_rc_verbs_ep_get_address,
453     .ep_connect_to_ep         = uct_rc_verbs_ep_connect_to_ep,
454     .iface_flush              = uct_rc_iface_flush,
455     .iface_fence              = uct_rc_iface_fence,
456     .iface_progress_enable    = uct_rc_verbs_iface_common_progress_enable,
457     .iface_progress_disable   = uct_base_iface_progress_disable,
458     .iface_progress           = uct_rc_iface_do_progress,
459     .iface_event_fd_get       = uct_ib_iface_event_fd_get,
460     .iface_event_arm          = uct_rc_iface_event_arm,
461     .iface_close              = UCS_CLASS_DELETE_FUNC_NAME(uct_rc_verbs_iface_t),
462     .iface_query              = uct_rc_verbs_iface_query,
463     .iface_get_address        = ucs_empty_function_return_success,
464     .iface_get_device_address = uct_ib_iface_get_device_address,
465     .iface_is_reachable       = uct_ib_iface_is_reachable,
466     },
467     .create_cq                = uct_ib_verbs_create_cq,
468     .arm_cq                   = uct_ib_iface_arm_cq,
469     .event_cq                 = (uct_ib_iface_event_cq_func_t)ucs_empty_function,
470     .handle_failure           = uct_rc_verbs_handle_failure,
471     .set_ep_failed            = uct_rc_verbs_ep_set_failed,
472     },
473     .init_rx                  = uct_rc_iface_verbs_init_rx,
474     .cleanup_rx               = uct_rc_iface_verbs_cleanup_rx,
475     .fc_ctrl                  = uct_rc_verbs_ep_fc_ctrl,
476     .fc_handler               = uct_rc_iface_fc_handler
477 };
478 
479 static ucs_status_t
uct_rc_verbs_query_tl_devices(uct_md_h md,uct_tl_device_resource_t ** tl_devices_p,unsigned * num_tl_devices_p)480 uct_rc_verbs_query_tl_devices(uct_md_h md,
481                               uct_tl_device_resource_t **tl_devices_p,
482                               unsigned *num_tl_devices_p)
483 {
484     uct_ib_md_t *ib_md = ucs_derived_of(md, uct_ib_md_t);
485     int flags;
486 
487     flags = ib_md->config.eth_pause ? 0 : UCT_IB_DEVICE_FLAG_LINK_IB;
488     return uct_ib_device_query_ports(&ib_md->dev, flags, tl_devices_p,
489                                      num_tl_devices_p);
490 }
491 
492 UCT_TL_DEFINE(&uct_ib_component, rc_verbs, uct_rc_verbs_query_tl_devices,
493               uct_rc_verbs_iface_t, "RC_VERBS_", uct_rc_verbs_iface_config_table,
494               uct_rc_verbs_iface_config_t);
495