Home
last modified time | relevance | path

Searched refs:send_cq (Results 1 – 25 of 64) sorted by relevance

123

/linux/drivers/infiniband/core/
H A Duverbs_std_types_qp.c95 struct ib_cq *send_cq = NULL; in UVERBS_HANDLER() local
168 send_cq = uverbs_attr_get_obj(attrs, in UVERBS_HANDLER()
170 if (IS_ERR(send_cq)) in UVERBS_HANDLER()
171 return PTR_ERR(send_cq); in UVERBS_HANDLER()
175 send_cq = uverbs_attr_get_obj(attrs, in UVERBS_HANDLER()
177 if (IS_ERR(send_cq)) in UVERBS_HANDLER()
178 return PTR_ERR(send_cq); in UVERBS_HANDLER()
234 attr.send_cq = send_cq; in UVERBS_HANDLER()
H A Dverbs.c1180 qp->send_cq = qp->recv_cq = NULL; in create_xrc_qp_user()
1232 qp->send_cq = attr->send_cq; in create_qp()
1246 qp->send_cq = attr->send_cq; in create_qp()
1306 if (qp->send_cq) in ib_qp_usecnt_inc()
1307 atomic_inc(&qp->send_cq->usecnt); in ib_qp_usecnt_inc()
1325 if (qp->send_cq) in ib_qp_usecnt_dec()
1326 atomic_dec(&qp->send_cq->usecnt); in ib_qp_usecnt_dec()
2819 struct ib_cq *cq = qp->send_cq; in __ib_drain_sq()
2912 trace_cq_drain_complete(qp->send_cq); in ib_drain_sq()
/linux/drivers/infiniband/hw/hns/
H A Dhns_roce_qp.c266 hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL; in add_qp_to_list()
1458 __acquire(&send_cq->lock); in hns_roce_lock_cqs()
1461 spin_lock_irq(&send_cq->lock); in hns_roce_lock_cqs()
1465 __acquire(&send_cq->lock); in hns_roce_lock_cqs()
1466 } else if (send_cq == recv_cq) { in hns_roce_lock_cqs()
1467 spin_lock_irq(&send_cq->lock); in hns_roce_lock_cqs()
1470 spin_lock_irq(&send_cq->lock); in hns_roce_lock_cqs()
1484 __release(&send_cq->lock); in hns_roce_unlock_cqs()
1487 spin_unlock(&send_cq->lock); in hns_roce_unlock_cqs()
1489 __release(&send_cq->lock); in hns_roce_unlock_cqs()
[all …]
H A Dhns_roce_hw_v2.c2638 free_mr->rsv_qp[i]->ibqp.send_cq = cq; in free_mr_init_qp()
2696 qp_init_attr.send_cq = cq; in free_mr_alloc_res()
5197 if (ibqp->send_cq) in clear_qp()
5198 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq), in clear_qp()
5500 qp_init_attr->send_cq = ibqp->send_cq; in hns_roce_v2_query_qp()
5524 struct hns_roce_cq *send_cq, *recv_cq; in hns_roce_v2_destroy_qp_common() local
5538 send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL; in hns_roce_v2_destroy_qp_common()
5542 hns_roce_lock_cqs(send_cq, recv_cq); in hns_roce_v2_destroy_qp_common()
5551 if (send_cq && send_cq != recv_cq) in hns_roce_v2_destroy_qp_common()
5552 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL); in hns_roce_v2_destroy_qp_common()
[all …]
/linux/drivers/infiniband/hw/mana/
H A Dqp.c270 struct mana_ib_cq *send_cq = in mana_ib_create_qp_raw() local
271 container_of(attr->send_cq, struct mana_ib_cq, ibcq); in mana_ib_create_qp_raw()
340 cq_spec.gdma_region = send_cq->queue.gdma_region; in mana_ib_create_qp_raw()
341 cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE; in mana_ib_create_qp_raw()
343 eq_vec = send_cq->comp_vector; in mana_ib_create_qp_raw()
358 send_cq->queue.gdma_region = GDMA_INVALID_DMA_REGION; in mana_ib_create_qp_raw()
361 send_cq->queue.id = cq_spec.queue_index; in mana_ib_create_qp_raw()
364 err = mana_ib_install_cq_cb(mdev, send_cq); in mana_ib_create_qp_raw()
370 qp->qp_handle, qp->raw_sq.id, send_cq->queue.id); in mana_ib_create_qp_raw()
373 resp.cqid = send_cq->queue.id; in mana_ib_create_qp_raw()
[all …]
/linux/drivers/infiniband/ulp/ipoib/
H A Dipoib_verbs.c187 priv->send_cq = ib_create_cq(priv->ca, ipoib_ib_tx_completion, NULL, in ipoib_transport_dev_init()
189 if (IS_ERR(priv->send_cq)) { in ipoib_transport_dev_init()
197 init_attr.send_cq = priv->send_cq; in ipoib_transport_dev_init()
218 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) in ipoib_transport_dev_init()
244 ib_destroy_cq(priv->send_cq); in ipoib_transport_dev_init()
266 ib_destroy_cq(priv->send_cq); in ipoib_transport_dev_cleanup()
H A Dipoib_ib.c439 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); in poll_tx()
507 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); in ipoib_tx_poll()
519 if (unlikely(ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP | in ipoib_tx_poll()
673 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP | in ipoib_send()
/linux/drivers/infiniband/hw/mthca/
H A Dmthca_qp.c1164 struct mthca_cq *send_cq, in mthca_alloc_qp_common() argument
1293 struct mthca_cq *send_cq, in mthca_alloc_qp() argument
1339 if (send_cq == recv_cq) { in mthca_lock_cqs()
1340 spin_lock_irq(&send_cq->lock); in mthca_lock_cqs()
1343 spin_lock_irq(&send_cq->lock); in mthca_lock_cqs()
1354 if (send_cq == recv_cq) { in mthca_unlock_cqs()
1361 spin_unlock(&send_cq->lock); in mthca_unlock_cqs()
1368 struct mthca_cq *send_cq, in mthca_alloc_sqp() argument
1448 struct mthca_cq *send_cq; in mthca_free_qp() local
1451 send_cq = to_mcq(qp->ibqp.send_cq); in mthca_free_qp()
[all …]
H A Dmthca_dev.h535 struct mthca_cq *send_cq,
544 struct mthca_cq *send_cq,
/linux/drivers/infiniband/hw/mlx5/
H A Dqp.c1316 send_cq ? get_ts_format(dev, send_cq, fr_sup, rt_sup) : in get_qp_ts_format()
2215 if (send_cq) in create_dci()
2418 if (send_cq) in create_user_qp()
2551 if (send_cq) in create_kernel_qp()
2568 if (send_cq) { in mlx5_ib_lock_cqs()
2598 if (send_cq) { in mlx5_ib_unlock_cqs()
2629 *send_cq = NULL; in get_cqs()
2693 get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, &send_cq, in destroy_qp_common()
2700 if (send_cq) in destroy_qp_common()
4260 if (send_cq) in __mlx5_ib_modify_qp()
[all …]
H A Dgsi.c50 struct ib_cq *gsi_cq = mqp->ibqp.send_cq; in generate_completions()
141 hw_init_attr.send_cq = gsi->cq; in mlx5_ib_create_gsi()
205 .send_cq = gsi->cq, in create_gsi_ud_qp()
H A Dmem.c244 qp_init_attr.send_cq = cq; in mlx5_ib_test_wc()
/linux/include/rdma/
H A Drdmavt_qp.h806 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq); in rvt_send_cq()
921 static inline u32 ib_cq_tail(struct ib_cq *send_cq) in ib_cq_tail() argument
923 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq); in ib_cq_tail()
925 return ibcq_to_rvtcq(send_cq)->ip ? in ib_cq_tail()
927 ibcq_to_rvtcq(send_cq)->kqueue->tail; in ib_cq_tail()
937 static inline u32 ib_cq_head(struct ib_cq *send_cq) in ib_cq_head() argument
939 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq); in ib_cq_head()
941 return ibcq_to_rvtcq(send_cq)->ip ? in ib_cq_head()
943 ibcq_to_rvtcq(send_cq)->kqueue->head; in ib_cq_head()
/linux/drivers/infiniband/hw/mlx4/
H A Dqp.c1305 if (send_cq == recv_cq) { in mlx4_ib_lock_cqs()
1306 spin_lock(&send_cq->lock); in mlx4_ib_lock_cqs()
1309 spin_lock(&send_cq->lock); in mlx4_ib_lock_cqs()
1320 if (send_cq == recv_cq) { in mlx4_ib_unlock_cqs()
1356 *recv_cq = *send_cq; in get_cqs()
1359 *send_cq = to_mcq(qp->ibqp.send_cq); in get_cqs()
1360 *recv_cq = *send_cq; in get_cqs()
1365 *send_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.send_cq) : in get_cqs()
1447 if (send_cq != recv_cq) in destroy_qp_common()
2359 recv_cq = send_cq; in __mlx4_ib_modify_qp()
[all …]
/linux/Documentation/translations/zh_CN/infiniband/
H A Dtag_matching.rst39 1. Eager协议--当发送方处理完发送时,完整的信息就会被发送。在send_cq中会收到
/linux/net/sunrpc/xprtrdma/
H A Dverbs.c345 if (ep->re_attr.send_cq) in rpcrdma_ep_destroy()
346 ib_free_cq(ep->re_attr.send_cq); in rpcrdma_ep_destroy()
347 ep->re_attr.send_cq = NULL; in rpcrdma_ep_destroy()
414 ep->re_attr.send_cq = ib_alloc_cq_any(device, r_xprt, in rpcrdma_ep_create()
417 if (IS_ERR(ep->re_attr.send_cq)) { in rpcrdma_ep_create()
418 rc = PTR_ERR(ep->re_attr.send_cq); in rpcrdma_ep_create()
419 ep->re_attr.send_cq = NULL; in rpcrdma_ep_create()
617 sc->sc_cid.ci_queue_id = ep->re_attr.send_cq->res.id; in rpcrdma_sendctx_create()
/linux/fs/smb/server/
H A Dtransport_rdma.c104 struct ib_cq *send_cq; member
446 if (t->send_cq) in free_transport()
447 ib_free_cq(t->send_cq); in free_transport()
1884 t->send_cq = ib_alloc_cq(t->cm_id->device, t, in smb_direct_create_qpair()
1887 if (IS_ERR(t->send_cq)) { in smb_direct_create_qpair()
1889 ret = PTR_ERR(t->send_cq); in smb_direct_create_qpair()
1890 t->send_cq = NULL; in smb_direct_create_qpair()
1909 qp_attr.send_cq = t->send_cq; in smb_direct_create_qpair()
1944 if (t->send_cq) { in smb_direct_create_qpair()
1945 ib_destroy_cq(t->send_cq); in smb_direct_create_qpair()
[all …]
/linux/drivers/net/ethernet/ibm/ehea/
H A Dehea_main.c804 struct ehea_cq *send_cq = pr->send_cq; in ehea_proc_cqes() local
813 cqe = ehea_poll_cq(send_cq); in ehea_proc_cqes()
815 ehea_inc_cq(send_cq); in ehea_proc_cqes()
855 cqe = ehea_poll_cq(send_cq); in ehea_proc_cqes()
893 ehea_reset_cq_ep(pr->send_cq); in ehea_poll()
895 ehea_reset_cq_n1(pr->send_cq); in ehea_poll()
898 cqe_skb = ehea_poll_cq(pr->send_cq); in ehea_poll()
1480 if (!pr->send_cq) { in ehea_init_port_res()
1487 pr->send_cq->attr.act_nr_of_cqes, in ehea_init_port_res()
1562 ehea_destroy_cq(pr->send_cq); in ehea_init_port_res()
[all …]
H A Dehea.h351 struct ehea_cq *send_cq; member
/linux/drivers/infiniband/hw/vmw_pvrdma/
H A Dpvrdma_qp.c58 static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq, in get_cqs() argument
61 *send_cq = to_vcq(qp->ibqp.send_cq); in get_cqs()
359 cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle; in pvrdma_create_qp()
1034 init_attr->send_cq = qp->ibqp.send_cq; in pvrdma_query_qp()
/linux/drivers/infiniband/sw/rxe/
H A Drxe_qp.c75 if (!init->recv_cq || !init->send_cq) { in rxe_qp_chk_init()
356 struct rxe_cq *scq = to_rcq(init->send_cq); in rxe_qp_from_init()
417 init->send_cq = qp->ibqp.send_cq; in rxe_qp_to_init()
/linux/fs/smb/client/
H A Dsmbdirect.c1357 ib_free_cq(info->send_cq); in smbd_destroy()
1546 info->send_cq = NULL; in _smbd_get_connection()
1548 info->send_cq = in _smbd_get_connection()
1551 if (IS_ERR(info->send_cq)) { in _smbd_get_connection()
1552 info->send_cq = NULL; in _smbd_get_connection()
1574 qp_attr.send_cq = info->send_cq; in _smbd_get_connection()
1691 if (info->send_cq) in _smbd_get_connection()
1692 ib_free_cq(info->send_cq); in _smbd_get_connection()
H A Dsmbdirect.h59 struct ib_cq *send_cq, *recv_cq; member
/linux/drivers/infiniband/ulp/srp/
H A Dib_srp.h157 struct ib_cq *send_cq; member
H A Dib_srp.c516 ib_process_cq_direct(ch->send_cq, -1); in srp_destroy_qp()
529 struct ib_cq *recv_cq, *send_cq; in srp_create_ch_ib() local
549 if (IS_ERR(send_cq)) { in srp_create_ch_ib()
550 ret = PTR_ERR(send_cq); in srp_create_ch_ib()
561 init_attr->send_cq = send_cq; in srp_create_ch_ib()
599 if (ch->send_cq) in srp_create_ch_ib()
600 ib_free_cq(ch->send_cq); in srp_create_ch_ib()
604 ch->send_cq = send_cq; in srp_create_ch_ib()
622 ib_free_cq(send_cq); in srp_create_ch_ib()
667 ib_free_cq(ch->send_cq); in srp_free_ch_ib()
[all …]

123