xref: /linux/drivers/infiniband/sw/rxe/rxe_queue.h (revision c6fbb759)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6 
7 #ifndef RXE_QUEUE_H
8 #define RXE_QUEUE_H
9 
10 /* Implements a simple circular buffer that is shared between user
11  * and the driver and can be resized. The requested element size is
12  * rounded up to a power of 2 and the number of elements in the buffer
13  * is also rounded up to a power of 2. Since the queue is empty when
14  * the producer and consumer indices match the maximum capacity of the
15  * queue is one less than the number of element slots.
16  *
17  * Notes:
18  *   - The driver indices are always masked off to q->index_mask
19  *     before storing so do not need to be checked on reads.
20  *   - The user whether user space or kernel is generally
21  *     not trusted so its parameters are masked to make sure
22  *     they do not access the queue out of bounds on reads.
23  *   - The driver indices for queues must not be written
24  *     by user so a local copy is used and a shared copy is
25  *     stored when the local copy is changed.
26  *   - By passing the type in the parameter list separate from q
27  *     the compiler can eliminate the switch statement when the
28  *     actual queue type is known when the function is called at
29  *     compile time.
30  *   - These queues are lock free. The user and driver must protect
31  *     changes to their end of the queues with locks if more than one
32  *     CPU can be accessing it at the same time.
33  */
34 
35 /**
36  * enum queue_type - type of queue
37  * @QUEUE_TYPE_TO_CLIENT:	Queue is written by rxe driver and
38  *				read by client. Used by rxe driver only.
39  * @QUEUE_TYPE_FROM_CLIENT:	Queue is written by client and
40  *				read by rxe driver. Used by rxe driver only.
41  * @QUEUE_TYPE_TO_DRIVER:	Queue is written by client and
42  *				read by rxe driver. Used by kernel client only.
43  * @QUEUE_TYPE_FROM_DRIVER:	Queue is written by rxe driver and
44  *				read by client. Used by kernel client only.
45  */
46 enum queue_type {
47 	QUEUE_TYPE_TO_CLIENT,
48 	QUEUE_TYPE_FROM_CLIENT,
49 	QUEUE_TYPE_TO_DRIVER,
50 	QUEUE_TYPE_FROM_DRIVER,
51 };
52 
53 struct rxe_queue_buf;
54 
55 struct rxe_queue {
56 	struct rxe_dev		*rxe;
57 	struct rxe_queue_buf	*buf;
58 	struct rxe_mmap_info	*ip;
59 	size_t			buf_size;
60 	size_t			elem_size;
61 	unsigned int		log2_elem_size;
62 	u32			index_mask;
63 	enum queue_type		type;
64 	/* private copy of index for shared queues between
65 	 * kernel space and user space. Kernel reads and writes
66 	 * this copy and then replicates to rxe_queue_buf
67 	 * for read access by user space.
68 	 */
69 	u32			index;
70 };
71 
72 int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
73 		 struct ib_udata *udata, struct rxe_queue_buf *buf,
74 		 size_t buf_size, struct rxe_mmap_info **ip_p);
75 
76 void rxe_queue_reset(struct rxe_queue *q);
77 
78 struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
79 			unsigned int elem_size, enum queue_type type);
80 
81 int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
82 		     unsigned int elem_size, struct ib_udata *udata,
83 		     struct mminfo __user *outbuf,
84 		     spinlock_t *producer_lock, spinlock_t *consumer_lock);
85 
86 void rxe_queue_cleanup(struct rxe_queue *queue);
87 
88 static inline u32 queue_next_index(struct rxe_queue *q, int index)
89 {
90 	return (index + 1) & q->index_mask;
91 }
92 
93 static inline u32 queue_get_producer(const struct rxe_queue *q,
94 				     enum queue_type type)
95 {
96 	u32 prod;
97 
98 	switch (type) {
99 	case QUEUE_TYPE_FROM_CLIENT:
100 		/* protect user index */
101 		prod = smp_load_acquire(&q->buf->producer_index);
102 		break;
103 	case QUEUE_TYPE_TO_CLIENT:
104 		prod = q->index;
105 		break;
106 	case QUEUE_TYPE_FROM_DRIVER:
107 		/* protect driver index */
108 		prod = smp_load_acquire(&q->buf->producer_index);
109 		break;
110 	case QUEUE_TYPE_TO_DRIVER:
111 		prod = q->buf->producer_index;
112 		break;
113 	}
114 
115 	return prod;
116 }
117 
118 static inline u32 queue_get_consumer(const struct rxe_queue *q,
119 				     enum queue_type type)
120 {
121 	u32 cons;
122 
123 	switch (type) {
124 	case QUEUE_TYPE_FROM_CLIENT:
125 		cons = q->index;
126 		break;
127 	case QUEUE_TYPE_TO_CLIENT:
128 		/* protect user index */
129 		cons = smp_load_acquire(&q->buf->consumer_index);
130 		break;
131 	case QUEUE_TYPE_FROM_DRIVER:
132 		cons = q->buf->consumer_index;
133 		break;
134 	case QUEUE_TYPE_TO_DRIVER:
135 		/* protect driver index */
136 		cons = smp_load_acquire(&q->buf->consumer_index);
137 		break;
138 	}
139 
140 	return cons;
141 }
142 
143 static inline int queue_empty(struct rxe_queue *q, enum queue_type type)
144 {
145 	u32 prod = queue_get_producer(q, type);
146 	u32 cons = queue_get_consumer(q, type);
147 
148 	return ((prod - cons) & q->index_mask) == 0;
149 }
150 
151 static inline int queue_full(struct rxe_queue *q, enum queue_type type)
152 {
153 	u32 prod = queue_get_producer(q, type);
154 	u32 cons = queue_get_consumer(q, type);
155 
156 	return ((prod + 1 - cons) & q->index_mask) == 0;
157 }
158 
159 static inline u32 queue_count(const struct rxe_queue *q,
160 					enum queue_type type)
161 {
162 	u32 prod = queue_get_producer(q, type);
163 	u32 cons = queue_get_consumer(q, type);
164 
165 	return (prod - cons) & q->index_mask;
166 }
167 
168 static inline void queue_advance_producer(struct rxe_queue *q,
169 					  enum queue_type type)
170 {
171 	u32 prod;
172 
173 	switch (type) {
174 	case QUEUE_TYPE_FROM_CLIENT:
175 		pr_warn("%s: attempt to advance client index\n",
176 			__func__);
177 		break;
178 	case QUEUE_TYPE_TO_CLIENT:
179 		prod = q->index;
180 		prod = (prod + 1) & q->index_mask;
181 		q->index = prod;
182 		/* protect user index */
183 		smp_store_release(&q->buf->producer_index, prod);
184 		break;
185 	case QUEUE_TYPE_FROM_DRIVER:
186 		pr_warn("%s: attempt to advance driver index\n",
187 			__func__);
188 		break;
189 	case QUEUE_TYPE_TO_DRIVER:
190 		prod = q->buf->producer_index;
191 		prod = (prod + 1) & q->index_mask;
192 		q->buf->producer_index = prod;
193 		break;
194 	}
195 }
196 
197 static inline void queue_advance_consumer(struct rxe_queue *q,
198 					  enum queue_type type)
199 {
200 	u32 cons;
201 
202 	switch (type) {
203 	case QUEUE_TYPE_FROM_CLIENT:
204 		cons = q->index;
205 		cons = (cons + 1) & q->index_mask;
206 		q->index = cons;
207 		/* protect user index */
208 		smp_store_release(&q->buf->consumer_index, cons);
209 		break;
210 	case QUEUE_TYPE_TO_CLIENT:
211 		pr_warn("%s: attempt to advance client index\n",
212 			__func__);
213 		break;
214 	case QUEUE_TYPE_FROM_DRIVER:
215 		cons = q->buf->consumer_index;
216 		cons = (cons + 1) & q->index_mask;
217 		q->buf->consumer_index = cons;
218 		break;
219 	case QUEUE_TYPE_TO_DRIVER:
220 		pr_warn("%s: attempt to advance driver index\n",
221 			__func__);
222 		break;
223 	}
224 }
225 
226 static inline void *queue_producer_addr(struct rxe_queue *q,
227 					enum queue_type type)
228 {
229 	u32 prod = queue_get_producer(q, type);
230 
231 	return q->buf->data + (prod << q->log2_elem_size);
232 }
233 
234 static inline void *queue_consumer_addr(struct rxe_queue *q,
235 					enum queue_type type)
236 {
237 	u32 cons = queue_get_consumer(q, type);
238 
239 	return q->buf->data + (cons << q->log2_elem_size);
240 }
241 
242 static inline void *queue_addr_from_index(struct rxe_queue *q, u32 index)
243 {
244 	return q->buf->data + ((index & q->index_mask)
245 				<< q->log2_elem_size);
246 }
247 
248 static inline u32 queue_index_from_addr(const struct rxe_queue *q,
249 				const void *addr)
250 {
251 	return (((u8 *)addr - q->buf->data) >> q->log2_elem_size)
252 				& q->index_mask;
253 }
254 
255 static inline void *queue_head(struct rxe_queue *q, enum queue_type type)
256 {
257 	return queue_empty(q, type) ? NULL : queue_consumer_addr(q, type);
258 }
259 
260 #endif /* RXE_QUEUE_H */
261