Home
last modified time | relevance | path

Searched refs:cqe (Results 1 – 25 of 135) sorted by relevance

123456

/freebsd/contrib/ofed/libmlx4/
H A Dcq.c104 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibv_cq.cqe); in get_sw_cqe() local
105 struct mlx4_cqe *tcqe = cq->cqe_size == 64 ? cqe + 1 : cqe; in get_sw_cqe()
108 !!(n & (cq->ibv_cq.cqe + 1))) ? NULL : cqe; in get_sw_cqe()
122 htobe32(cqe->vlan_my_qpn), htobe32(cqe->wqe_index), in mlx4_handle_error_cqe()
209 if (!cqe) in mlx4_get_next_cqe()
213 ++cqe; in mlx4_get_next_cqe()
217 VALGRIND_MAKE_MEM_DEFINED(cqe, sizeof *cqe); in mlx4_get_next_cqe()
225 *pcqe = cqe; in mlx4_get_next_cqe()
255 cq->cqe = cqe; in mlx4_parse_cqe()
738 cqe = get_cqe(cq, prod_index & cq->ibv_cq.cqe); in __mlx4_cq_clean()
[all …]
H A Dverbs.c460 if (cq_attr->cqe > 0x3fffff) { in create_cq()
497 cq_attr->cqe = align_queue_size(cq_attr->cqe + 1); in create_cq()
517 --cq_attr->cqe; in create_cq()
552 struct ibv_cq_init_attr_ex cq_attr = {.cqe = cqe, .channel = channel, in mlx4_create_cq()
567 struct ibv_cq_init_attr_ex cq_attr_c = {.cqe = cq_attr->cqe, in mlx4_create_cq_ex()
577 int mlx4_resize_cq(struct ibv_cq *ibcq, int cqe) in mlx4_resize_cq() argument
586 if (cqe > 0x3fffff) in mlx4_resize_cq()
591 cqe = align_queue_size(cqe + 1); in mlx4_resize_cq()
592 if (cqe == ibcq->cqe + 1) { in mlx4_resize_cq()
599 if (cqe < outst_cqe + 1) { in mlx4_resize_cq()
[all …]
/freebsd/sys/dev/mthca/
H A Dmthca_cq.c176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; in cqe_sw()
195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]), in dump_cqe()
196 be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]), in dump_cqe()
197 be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7])); in dump_cqe()
309 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); in mthca_cq_clean()
341 cq->ibcq.cqe < cq->resize_buf->cqe) { in mthca_cq_resize_copy_cqes()
388 be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe), in handle_error_cqe()
499 if (!cqe) in mthca_poll_one()
517 is_send = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80; in mthca_poll_one()
651 set_cqe_hw(cqe); in mthca_poll_one()
[all …]
/freebsd/sys/dev/mlx4/mlx4_ib/
H A Dmlx4_ib_cq.c82 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local
86 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; in get_sw_cqe()
351 cqe = get_cqe(cq, i & cq->ibcq.cqe); in mlx4_ib_cq_resize_copy_cqes()
362 cqe = get_cqe(cq, ++i & cq->ibcq.cqe); in mlx4_ib_cq_resize_copy_cqes()
419 cq->ibcq.cqe = cq->resize_buf->cqe; in mlx4_ib_resize_cq()
436 cq->ibcq.cqe = cq->resize_buf->cqe; in mlx4_ib_resize_cq()
667 if (!cqe) in mlx4_ib_poll_one()
671 cqe++; in mlx4_ib_poll_one()
692 cq->ibcq.cqe = cq->resize_buf->cqe; in mlx4_ib_poll_one()
937 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); in __mlx4_ib_cq_clean()
[all …]
/freebsd/contrib/ofed/libcxgb4/
H A Dcq.c48 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe()
81 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe()
289 if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe)) in cqe_completes_wr()
292 if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe)) in cqe_completes_wr()
295 if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq)) in cqe_completes_wr()
610 CQE_QPID(&cqe), CQE_TYPE(&cqe), in c4iw_poll_cq_one()
611 CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_WRID_HI(&cqe), in c4iw_poll_cq_one()
641 CQE_OPCODE(&cqe), CQE_QPID(&cqe)); in c4iw_poll_cq_one()
696 CQE_STATUS(&cqe), CQE_QPID(&cqe)); in c4iw_poll_cq_one()
703 chp->cq.cqid, CQE_QPID(&cqe), CQE_TYPE(&cqe), in c4iw_poll_cq_one()
[all …]
/freebsd/contrib/ofed/libmlx5/
H A Dcq.c83 void *cqe = get_cqe(cq, n & cq->ibv_cq.cqe); in get_sw_cqe() local
86 cqe64 = (cq->cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe()
90 return cqe; in get_sw_cqe()
471 void *cqe; in mlx5_get_next_cqe() local
475 if (!cqe) in mlx5_get_next_cqe()
478 cqe64 = (cq->cqe_sz == 64) ? cqe : cqe + 64; in mlx5_get_next_cqe()
719 void *cqe; in mlx5_poll_one() local
836 void *cqe; in mlx5_start_poll() local
905 void *cqe; in mlx5_next_poll() local
1392 cqe = get_cqe(cq, prod_index & cq->ibv_cq.cqe); in __mlx5_cq_clean()
[all …]
H A Dmlx5dv.h312 uint8_t mlx5dv_get_cqe_owner(struct mlx5_cqe64 *cqe) in mlx5dv_get_cqe_owner() argument
314 return cqe->op_own & 0x1; in mlx5dv_get_cqe_owner()
318 void mlx5dv_set_cqe_owner(struct mlx5_cqe64 *cqe, uint8_t val) in mlx5dv_set_cqe_owner() argument
320 cqe->op_own = (val & 0x1) | (cqe->op_own & ~0x1); in mlx5dv_set_cqe_owner()
325 uint8_t mlx5dv_get_cqe_se(struct mlx5_cqe64 *cqe) in mlx5dv_get_cqe_se() argument
327 return (cqe->op_own >> 1) & 0x1; in mlx5dv_get_cqe_se()
331 uint8_t mlx5dv_get_cqe_format(struct mlx5_cqe64 *cqe) in mlx5dv_get_cqe_format() argument
333 return (cqe->op_own >> 2) & 0x3; in mlx5dv_get_cqe_format()
337 uint8_t mlx5dv_get_cqe_opcode(struct mlx5_cqe64 *cqe) in mlx5dv_get_cqe_opcode() argument
339 return cqe->op_own >> 4; in mlx5dv_get_cqe_opcode()
/freebsd/sys/dev/cxgbe/iw_cxgbe/
H A Dcq.c208 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe()
241 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe()
443 if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe)) in cqe_completes_wr()
446 if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe)) in cqe_completes_wr()
449 if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq)) in cqe_completes_wr()
464 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) && in c4iw_count_rcqes()
647 *cqe = *hw_cqe; in poll_cq()
745 __func__, CQE_QPID(&cqe), CQE_TYPE(&cqe), CQE_OPCODE(&cqe), in c4iw_poll_cq_one()
748 __func__, CQE_LEN(&cqe), CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), in c4iw_poll_cq_one()
798 CQE_OPCODE(&cqe), CQE_QPID(&cqe)); in c4iw_poll_cq_one()
[all …]
H A Dt4.h288 struct t4_cqe cqe; member
635 static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe) in t4_valid_cqe() argument
637 return (CQE_GENBIT(cqe) == cq->gen); in t4_valid_cqe()
645 static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) in t4_next_hw_cqe() argument
664 *cqe = &cq->queue[cq->cidx]; in t4_next_hw_cqe()
685 static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe) in t4_next_cqe() argument
692 *cqe = &cq->sw_queue[cq->sw_cidx]; in t4_next_cqe()
694 ret = t4_next_hw_cqe(cq, cqe); in t4_next_cqe()
/freebsd/sys/dev/mlx5/mlx5_en/
H A Dmlx5_en_rx.c166 if (get_cqe_lro_tcppsh(cqe)) in mlx5e_lro_update_hdr()
172 th->th_win = cqe->lro_tcp_win; in mlx5e_lro_update_hdr()
288 mlx5e_lro_update_hdr(mb, cqe); in mlx5e_build_rx_mbuf()
311 if (cqe->rss_hash_type != 0) { in mlx5e_build_rx_mbuf()
315 switch (cqe->rss_hash_type & in mlx5e_build_rx_mbuf()
345 if (cqe_is_tunneled(cqe)) in mlx5e_build_rx_mbuf()
355 if (cqe_is_tunneled(cqe)) { in mlx5e_build_rx_mbuf()
388 if (cqe_has_vlan(cqe)) { in mlx5e_build_rx_mbuf()
506 struct mlx5_cqe64 *cqe; in mlx5e_poll_rx_cq() local
512 cqe = mlx5e_get_cqe(&rq->cq); in mlx5e_poll_rx_cq()
[all …]
H A Dmlx5_en_txrx.c36 struct mlx5_cqe64 *cqe; in mlx5e_get_cqe() local
38 cqe = mlx5_cqwq_get_wqe(&cq->wq, mlx5_cqwq_get_ci(&cq->wq)); in mlx5e_get_cqe()
40 if ((cqe->op_own ^ mlx5_cqwq_get_wrap_cnt(&cq->wq)) & MLX5_CQE_OWNER_MASK) in mlx5e_get_cqe()
46 return (cqe); in mlx5e_get_cqe()
/freebsd/sys/dev/mlx5/mlx5_ib/
H A Dmlx5_ib_cq.c81 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local
84 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe()
88 return cqe; in get_sw_cqe()
530 void *cqe; in mlx5_poll_one() local
535 if (!cqe) in mlx5_poll_one()
538 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in mlx5_poll_one()
843 void *cqe; in init_cq_buf() local
848 cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64; in init_cq_buf()
1037 void *cqe, *dest; in __mlx5_ib_cq_clean() local
1059 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); in __mlx5_ib_cq_clean()
[all …]
/freebsd/sys/dev/oce/
H A Doce_if.c1404 while (cqe->u0.dw[3]) { in oce_wq_handler()
1413 cqe->u0.dw[3] = 0; in oce_wq_handler()
1417 cqe = in oce_wq_handler()
1711 if (cqe->u0.s.qnq) { in oce_rx()
2057 while (cqe->u0.dw[2]) { in oce_rq_handler()
2061 oce_rx(rq, cqe); in oce_rq_handler()
2066 oce_rx(rq, cqe); in oce_rq_handler()
2069 cqe->u0.dw[2] = 0; in oce_rq_handler()
2080 cqe = in oce_rq_handler()
2663 struct oce_mq_cqe *cqe; in oce_mq_handler() local
[all …]
H A Doce_queue.c1024 struct oce_nic_tx_cqe *cqe; in oce_drain_wq_cq() local
1032 if (cqe->u0.dw[3] == 0) in oce_drain_wq_cq()
1034 cqe->u0.dw[3] = 0; in oce_drain_wq_cq()
1067 struct oce_nic_rx_cqe *cqe; in oce_drain_rq_cq() local
1076 while (RQ_CQE_VALID(cqe)) { in oce_drain_rq_cq()
1077 RQ_CQE_INVALIDATE(cqe); in oce_drain_rq_cq()
1122 if(cqe->valid) { in oce_rx_cq_clean_hwlro()
1130 if(cqe->pkt_size % rq->cfg.frag_size) in oce_rx_cq_clean_hwlro()
1188 struct oce_nic_rx_cqe *cqe; in oce_rx_cq_clean() local
1200 if(RQ_CQE_VALID(cqe)) { in oce_rx_cq_clean()
[all …]
/freebsd/sys/dev/nvmf/host/
H A Dnvmf_aer.c35 static void nvmf_complete_aer(void *arg, const struct nvme_completion *cqe);
124 nvmf_complete_aer_page(void *arg, const struct nvme_completion *cqe) in nvmf_complete_aer_page() argument
130 aer->status = cqe->status; in nvmf_complete_aer_page()
154 nvmf_complete_aer(void *arg, const struct nvme_completion *cqe) in nvmf_complete_aer() argument
168 if (cqe->status != 0) { in nvmf_complete_aer()
169 if (!nvmf_cqe_aborted(cqe)) in nvmf_complete_aer()
171 le16toh(cqe->status)); in nvmf_complete_aer()
175 cdw0 = le32toh(cqe->cdw0); in nvmf_complete_aer()
230 if (status.cqe.status != 0) { in nvmf_set_async_event_config()
233 le16toh(status.cqe.status)); in nvmf_set_async_event_config()
H A Dnvmf_qpair.c92 struct nvme_completion cqe; in nvmf_abort_request() local
94 memset(&cqe, 0, sizeof(cqe)); in nvmf_abort_request()
95 cqe.cid = cid; in nvmf_abort_request()
96 cqe.status = htole16(NVMEF(NVME_STATUS_SCT, NVME_SCT_PATH_RELATED) | in nvmf_abort_request()
98 req->cb(req->cb_arg, &cqe); in nvmf_abort_request()
158 const struct nvme_completion *cqe; in nvmf_receive_capsule() local
161 cqe = nvmf_capsule_cqe(nc); in nvmf_receive_capsule()
170 cid = cqe->cid; in nvmf_receive_capsule()
219 req->cb(req->cb_arg, cqe); in nvmf_receive_capsule()
H A Dnvmf.c36 status->cqe = *cqe; in nvmf_complete()
83 if (status.cqe.status != 0) { in nvmf_read_property()
85 le16toh(status.cqe.status)); in nvmf_read_property()
109 if (status.cqe.status != 0) { in nvmf_write_property()
111 le16toh(status.cqe.status)); in nvmf_write_property()
160 if (cqe->status != 0) { in nvmf_keep_alive_complete()
163 le16toh(cqe->status)); in nvmf_keep_alive_complete()
315 if (status.cqe.status != 0) { in nvmf_scan_nslist()
354 if (status.cqe.status != 0) { in nvmf_scan_nslist()
736 if (status.cqe.status != 0) { in nvmf_rescan_ns()
[all …]
H A Dnvmf_var.h98 struct nvme_completion cqe; member
112 nvmf_cqe_aborted(const struct nvme_completion *cqe) in nvmf_cqe_aborted() argument
116 status = le16toh(cqe->status); in nvmf_cqe_aborted()
144 void nvmf_complete(void *arg, const struct nvme_completion *cqe);
/freebsd/sys/dev/mlx5/
H A Ddevice.h720 return (cqe->op_own >> 4); in get_cqe_opcode()
725 return (cqe->lro_tcppsh_abort_dupack >> 7) & 1; in get_cqe_lro_timestamp_valid()
730 return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; in get_cqe_lro_tcppsh()
735 return (cqe->l4_hdr_type_etc >> 4) & 0x7; in get_cqe_l4_hdr_type()
740 return be16_to_cpu(cqe->vlan_info) & 0xfff; in get_cqe_vlan()
745 memcpy(smac, &cqe->rss_hash_type , 4); in get_cqe_smac()
746 memcpy(smac + 4, &cqe->slid , 2); in get_cqe_smac()
751 return cqe->l4_hdr_type_etc & 0x1; in cqe_has_vlan()
756 return cqe->tls_outer_l3_tunneled & 0x1; in cqe_is_tunneled()
761 return (cqe->tls_outer_l3_tunneled >> 3) & 0x3; in get_cqe_tls_offload()
[all …]
/freebsd/sys/dev/nvmf/controller/
H A Dnvmft_qpair.c172 _nvmft_send_response(struct nvmft_qpair *qp, const void *cqe) in _nvmft_send_response() argument
179 memcpy(&cpl, cqe, sizeof(cpl)); in _nvmft_send_response()
218 nvmft_send_response(struct nvmft_qpair *qp, const void *cqe) in nvmft_send_response() argument
220 const struct nvme_completion *cpl = cqe; in nvmft_send_response()
227 return (_nvmft_send_response(qp, cqe)); in nvmft_send_response()
231 nvmft_init_cqe(void *cqe, struct nvmf_capsule *nc, uint16_t status) in nvmft_init_cqe() argument
233 struct nvme_completion *cpl = cqe; in nvmft_init_cqe()
/freebsd/sys/dev/mlx4/mlx4_en/
H A Dmlx4_en_rx.c604 struct mlx4_cqe *cqe) in invalid_cqe() argument
611 ((struct mlx4_err_cqe *)cqe)->syndrome); in invalid_cqe()
614 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { in invalid_cqe()
741 struct mlx4_cqe *cqe; in mlx4_en_process_rx_cq() local
764 cqe = &buf[CQE_FACTOR_INDEX(index, factor)]; in mlx4_en_process_rx_cq()
777 if (invalid_cqe(priv, cqe)) { in mlx4_en_process_rx_cq()
783 length = be32_to_cpu(cqe->byte_cnt); in mlx4_en_process_rx_cq()
804 if (be32_to_cpu(cqe->vlan_my_qpn) & in mlx4_en_process_rx_cq()
812 (cqe->checksum == cpu_to_be16(0xffff))) { in mlx4_en_process_rx_cq()
825 if (mlx4_en_can_lro(cqe->status) && in mlx4_en_process_rx_cq()
[all …]
/freebsd/sys/dev/irdma/
H A Dirdma_uk.c1104 __le64 *cqe, *wqe; in irdma_detect_unsignaled_cmpls() local
1109 cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq); in irdma_detect_unsignaled_cmpls()
1152 __le64 *cqe; in irdma_uk_cq_poll_cmpl() local
1165 cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq); in irdma_uk_cq_poll_cmpl()
1167 get_64bit_val(cqe, IRDMA_BYTE_24, &qword3); in irdma_uk_cq_poll_cmpl()
1181 ext_cqe = (__le64 *) ((u8 *)cqe + 32); in irdma_uk_cq_poll_cmpl()
1253 get_64bit_val(cqe, IRDMA_BYTE_0, &qword0); in irdma_uk_cq_poll_cmpl()
1254 get_64bit_val(cqe, IRDMA_BYTE_16, &qword2); in irdma_uk_cq_poll_cmpl()
1717 __le64 *cqe; in irdma_uk_clean_cq() local
1728 cqe = cq->cq_base[cq_head].buf; in irdma_uk_clean_cq()
[all …]
/freebsd/sys/dev/mlx5/mlx5_fpga/
H A Dmlx5fpga_conn.c248 struct mlx5_cqe64 *cqe, u8 status) in mlx5_fpga_conn_rq_cqe() argument
289 struct mlx5_cqe64 *cqe, u8 status) in mlx5_fpga_conn_sq_cqe() argument
331 struct mlx5_cqe64 *cqe) in mlx5_fpga_conn_handle_cqe() argument
335 opcode = cqe->op_own >> 4; in mlx5_fpga_conn_handle_cqe()
342 mlx5_fpga_conn_sq_cqe(conn, cqe, status); in mlx5_fpga_conn_handle_cqe()
349 mlx5_fpga_conn_rq_cqe(conn, cqe, status); in mlx5_fpga_conn_handle_cqe()
383 struct mlx5_cqe64 *cqe; in mlx5_fpga_conn_cqes() local
386 cqe = mlx5_cqwq_get_cqe(&conn->cq.wq); in mlx5_fpga_conn_cqes()
387 if (!cqe) in mlx5_fpga_conn_cqes()
392 mlx5_fpga_conn_handle_cqe(conn, cqe); in mlx5_fpga_conn_cqes()
[all …]
/freebsd/sys/ofed/drivers/infiniband/core/
H A Dib_uverbs_std_types_cq.c80 ret = uverbs_copy_from(&attr.cqe, attrs, in UVERBS_HANDLER()
131 ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_CQ_RESP_CQE, &cq->cqe, in UVERBS_HANDLER()
132 sizeof(cq->cqe)); in UVERBS_HANDLER()
/freebsd/contrib/ofed/libibverbs/
H A Dcompat-1_0.c140 int cqe; member
175 struct ibv_cq * (*create_cq)(struct ibv_context *context, int cqe,
183 int (*resize_cq)(struct ibv_cq *cq, int cqe);
256 struct ibv_cq_1_0 *__ibv_create_cq_1_0(struct ibv_context_1_0 *context, int cqe,
260 int __ibv_resize_cq_1_0(struct ibv_cq_1_0 *cq, int cqe);
739 struct ibv_cq_1_0 *__ibv_create_cq_1_0(struct ibv_context_1_0 *context, int cqe, in __ibv_create_cq_1_0() argument
751 real_cq = ibv_create_cq(context->real_context, cqe, cq_context, in __ibv_create_cq_1_0()
760 cq->cqe = cqe; in __ibv_create_cq_1_0()
769 int __ibv_resize_cq_1_0(struct ibv_cq_1_0 *cq, int cqe) in __ibv_resize_cq_1_0() argument
771 return ibv_resize_cq(cq->real_cq, cqe); in __ibv_resize_cq_1_0()

123456