xref: /linux/include/rdma/rdmavt_cq.h (revision 6bf9d8f6)
1*6bf9d8f6SLeon Romanovsky /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2*6bf9d8f6SLeon Romanovsky /*
3*6bf9d8f6SLeon Romanovsky  * Copyright(c) 2016 - 2018 Intel Corporation.
4*6bf9d8f6SLeon Romanovsky  */
5*6bf9d8f6SLeon Romanovsky 
66f6387aeSDennis Dalessandro #ifndef DEF_RDMAVT_INCCQ_H
76f6387aeSDennis Dalessandro #define DEF_RDMAVT_INCCQ_H
86f6387aeSDennis Dalessandro 
96f6387aeSDennis Dalessandro #include <linux/kthread.h>
106f6387aeSDennis Dalessandro #include <rdma/ib_user_verbs.h>
1139289bfcSJason Gunthorpe #include <rdma/ib_verbs.h>
126f6387aeSDennis Dalessandro 
136f6387aeSDennis Dalessandro /*
146f6387aeSDennis Dalessandro  * Define an ib_cq_notify value that is not valid so we know when CQ
156f6387aeSDennis Dalessandro  * notifications are armed.
166f6387aeSDennis Dalessandro  */
176f6387aeSDennis Dalessandro #define RVT_CQ_NONE      (IB_CQ_NEXT_COMP + 1)
186f6387aeSDennis Dalessandro 
196f6387aeSDennis Dalessandro /*
20239b0e52SKamenee Arumugam  * Define read macro that apply smp_load_acquire memory barrier
21239b0e52SKamenee Arumugam  * when reading indice of circular buffer that mmaped to user space.
22239b0e52SKamenee Arumugam  */
23239b0e52SKamenee Arumugam #define RDMA_READ_UAPI_ATOMIC(member) smp_load_acquire(&(member).val)
24239b0e52SKamenee Arumugam 
25239b0e52SKamenee Arumugam /*
26239b0e52SKamenee Arumugam  * Define write macro that uses smp_store_release memory barrier
27239b0e52SKamenee Arumugam  * when writing indice of circular buffer that mmaped to user space.
28239b0e52SKamenee Arumugam  */
29239b0e52SKamenee Arumugam #define RDMA_WRITE_UAPI_ATOMIC(member, x) smp_store_release(&(member).val, x)
30239b0e52SKamenee Arumugam #include <rdma/rvt-abi.h>
31239b0e52SKamenee Arumugam 
32239b0e52SKamenee Arumugam /*
336f6387aeSDennis Dalessandro  * This structure is used to contain the head pointer, tail pointer,
346f6387aeSDennis Dalessandro  * and completion queue entries as a single memory allocation so
356f6387aeSDennis Dalessandro  * it can be mmap'ed into user space.
366f6387aeSDennis Dalessandro  */
37239b0e52SKamenee Arumugam struct rvt_k_cq_wc {
386f6387aeSDennis Dalessandro 	u32 head;               /* index of next entry to fill */
396f6387aeSDennis Dalessandro 	u32 tail;               /* index of next ib_poll_cq() entry */
40239b0e52SKamenee Arumugam 	struct ib_wc kqueue[];
416f6387aeSDennis Dalessandro };
426f6387aeSDennis Dalessandro 
436f6387aeSDennis Dalessandro /*
446f6387aeSDennis Dalessandro  * The completion queue structure.
456f6387aeSDennis Dalessandro  */
466f6387aeSDennis Dalessandro struct rvt_cq {
476f6387aeSDennis Dalessandro 	struct ib_cq ibcq;
485d18ee67SSebastian Sanchez 	struct work_struct comptask;
496f6387aeSDennis Dalessandro 	spinlock_t lock; /* protect changes in this struct */
506f6387aeSDennis Dalessandro 	u8 notify;
516f6387aeSDennis Dalessandro 	u8 triggered;
525136bfeaSKamenee Arumugam 	u8 cq_full;
535d18ee67SSebastian Sanchez 	int comp_vector_cpu;
546f6387aeSDennis Dalessandro 	struct rvt_dev_info *rdi;
556f6387aeSDennis Dalessandro 	struct rvt_cq_wc *queue;
566f6387aeSDennis Dalessandro 	struct rvt_mmap_info *ip;
57239b0e52SKamenee Arumugam 	struct rvt_k_cq_wc *kqueue;
586f6387aeSDennis Dalessandro };
596f6387aeSDennis Dalessandro 
ibcq_to_rvtcq(struct ib_cq * ibcq)606f6387aeSDennis Dalessandro static inline struct rvt_cq *ibcq_to_rvtcq(struct ib_cq *ibcq)
616f6387aeSDennis Dalessandro {
626f6387aeSDennis Dalessandro 	return container_of(ibcq, struct rvt_cq, ibcq);
636f6387aeSDennis Dalessandro }
646f6387aeSDennis Dalessandro 
655136bfeaSKamenee Arumugam bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited);
666f6387aeSDennis Dalessandro 
676f6387aeSDennis Dalessandro #endif          /* DEF_RDMAVT_INCCQH */
68