xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/wq.h (revision 2c925db0)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef __MLX5_WQ_H__
34 #define __MLX5_WQ_H__
35 
36 #include <linux/mlx5/mlx5_ifc.h>
37 #include <linux/mlx5/cq.h>
38 #include <linux/mlx5/qp.h>
39 
40 struct mlx5_wq_param {
41 	int		buf_numa_node;
42 	int		db_numa_node;
43 };
44 
45 struct mlx5_wq_ctrl {
46 	struct mlx5_core_dev	*mdev;
47 	struct mlx5_frag_buf	buf;
48 	struct mlx5_db		db;
49 };
50 
51 struct mlx5_wq_cyc {
52 	struct mlx5_frag_buf_ctrl fbc;
53 	__be32			*db;
54 	u16			sz;
55 	u16			wqe_ctr;
56 	u16			cur_sz;
57 };
58 
59 struct mlx5_wq_qp {
60 	struct mlx5_wq_cyc	rq;
61 	struct mlx5_wq_cyc	sq;
62 };
63 
64 struct mlx5_cqwq {
65 	struct mlx5_frag_buf_ctrl fbc;
66 	__be32			  *db;
67 	u32			  cc; /* consumer counter */
68 };
69 
70 struct mlx5_wq_ll {
71 	struct mlx5_frag_buf_ctrl fbc;
72 	__be32			*db;
73 	__be16			*tail_next;
74 	u16			head;
75 	u16			wqe_ctr;
76 	u16			cur_sz;
77 };
78 
79 int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
80 		       void *wqc, struct mlx5_wq_cyc *wq,
81 		       struct mlx5_wq_ctrl *wq_ctrl);
82 void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides);
83 void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq);
84 
85 int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
86 		      void *qpc, struct mlx5_wq_qp *wq,
87 		      struct mlx5_wq_ctrl *wq_ctrl);
88 
89 int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
90 		     void *cqc, struct mlx5_cqwq *wq,
91 		     struct mlx5_wq_ctrl *wq_ctrl);
92 
93 int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
94 		      void *wqc, struct mlx5_wq_ll *wq,
95 		      struct mlx5_wq_ctrl *wq_ctrl);
96 void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq);
97 
98 void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
99 
mlx5_wq_cyc_get_size(struct mlx5_wq_cyc * wq)100 static inline u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
101 {
102 	return (u32)wq->fbc.sz_m1 + 1;
103 }
104 
mlx5_wq_cyc_is_full(struct mlx5_wq_cyc * wq)105 static inline int mlx5_wq_cyc_is_full(struct mlx5_wq_cyc *wq)
106 {
107 	return wq->cur_sz == wq->sz;
108 }
109 
mlx5_wq_cyc_missing(struct mlx5_wq_cyc * wq)110 static inline int mlx5_wq_cyc_missing(struct mlx5_wq_cyc *wq)
111 {
112 	return wq->sz - wq->cur_sz;
113 }
114 
mlx5_wq_cyc_is_empty(struct mlx5_wq_cyc * wq)115 static inline int mlx5_wq_cyc_is_empty(struct mlx5_wq_cyc *wq)
116 {
117 	return !wq->cur_sz;
118 }
119 
mlx5_wq_cyc_push(struct mlx5_wq_cyc * wq)120 static inline void mlx5_wq_cyc_push(struct mlx5_wq_cyc *wq)
121 {
122 	wq->wqe_ctr++;
123 	wq->cur_sz++;
124 }
125 
mlx5_wq_cyc_push_n(struct mlx5_wq_cyc * wq,u16 n)126 static inline void mlx5_wq_cyc_push_n(struct mlx5_wq_cyc *wq, u16 n)
127 {
128 	wq->wqe_ctr += n;
129 	wq->cur_sz += n;
130 }
131 
mlx5_wq_cyc_pop(struct mlx5_wq_cyc * wq)132 static inline void mlx5_wq_cyc_pop(struct mlx5_wq_cyc *wq)
133 {
134 	wq->cur_sz--;
135 }
136 
mlx5_wq_cyc_update_db_record(struct mlx5_wq_cyc * wq)137 static inline void mlx5_wq_cyc_update_db_record(struct mlx5_wq_cyc *wq)
138 {
139 	*wq->db = cpu_to_be32(wq->wqe_ctr);
140 }
141 
mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc * wq,u16 ctr)142 static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
143 {
144 	return ctr & wq->fbc.sz_m1;
145 }
146 
mlx5_wq_cyc_get_head(struct mlx5_wq_cyc * wq)147 static inline u16 mlx5_wq_cyc_get_head(struct mlx5_wq_cyc *wq)
148 {
149 	return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr);
150 }
151 
mlx5_wq_cyc_get_tail(struct mlx5_wq_cyc * wq)152 static inline u16 mlx5_wq_cyc_get_tail(struct mlx5_wq_cyc *wq)
153 {
154 	return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr - wq->cur_sz);
155 }
156 
mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc * wq,u16 ix)157 static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
158 {
159 	return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
160 }
161 
mlx5_wq_cyc_get_contig_wqebbs(struct mlx5_wq_cyc * wq,u16 ix)162 static inline u16 mlx5_wq_cyc_get_contig_wqebbs(struct mlx5_wq_cyc *wq, u16 ix)
163 {
164 	return mlx5_frag_buf_get_idx_last_contig_stride(&wq->fbc, ix) - ix + 1;
165 }
166 
mlx5_wq_cyc_cc_bigger(u16 cc1,u16 cc2)167 static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
168 {
169 	int equal   = (cc1 == cc2);
170 	int smaller = 0x8000 & (cc1 - cc2);
171 
172 	return !equal && !smaller;
173 }
174 
mlx5_wq_cyc_get_counter(struct mlx5_wq_cyc * wq)175 static inline u16 mlx5_wq_cyc_get_counter(struct mlx5_wq_cyc *wq)
176 {
177 	return wq->wqe_ctr;
178 }
179 
mlx5_cqwq_get_size(struct mlx5_cqwq * wq)180 static inline u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
181 {
182 	return wq->fbc.sz_m1 + 1;
183 }
184 
mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq * wq)185 static inline u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq)
186 {
187 	return wq->fbc.log_stride;
188 }
189 
mlx5_cqwq_ctr2ix(struct mlx5_cqwq * wq,u32 ctr)190 static inline u32 mlx5_cqwq_ctr2ix(struct mlx5_cqwq *wq, u32 ctr)
191 {
192 	return ctr & wq->fbc.sz_m1;
193 }
194 
mlx5_cqwq_get_ci(struct mlx5_cqwq * wq)195 static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
196 {
197 	return mlx5_cqwq_ctr2ix(wq, wq->cc);
198 }
199 
mlx5_cqwq_get_wqe(struct mlx5_cqwq * wq,u32 ix)200 static inline struct mlx5_cqe64 *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
201 {
202 	struct mlx5_cqe64 *cqe = mlx5_frag_buf_get_wqe(&wq->fbc, ix);
203 
204 	/* For 128B CQEs the data is in the last 64B */
205 	cqe += wq->fbc.log_stride == 7;
206 
207 	return cqe;
208 }
209 
mlx5_cqwq_get_ctr_wrap_cnt(struct mlx5_cqwq * wq,u32 ctr)210 static inline u32 mlx5_cqwq_get_ctr_wrap_cnt(struct mlx5_cqwq *wq, u32 ctr)
211 {
212 	return ctr >> wq->fbc.log_sz;
213 }
214 
mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq * wq)215 static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
216 {
217 	return mlx5_cqwq_get_ctr_wrap_cnt(wq, wq->cc);
218 }
219 
mlx5_cqwq_pop(struct mlx5_cqwq * wq)220 static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq)
221 {
222 	wq->cc++;
223 }
224 
mlx5_cqwq_update_db_record(struct mlx5_cqwq * wq)225 static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq)
226 {
227 	*wq->db = cpu_to_be32(wq->cc & 0xffffff);
228 }
229 
mlx5_cqwq_get_cqe(struct mlx5_cqwq * wq)230 static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq)
231 {
232 	u32 ci = mlx5_cqwq_get_ci(wq);
233 	struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
234 	u8 cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
235 	u8 sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
236 
237 	if (cqe_ownership_bit != sw_ownership_val)
238 		return NULL;
239 
240 	/* ensure cqe content is read after cqe ownership bit */
241 	dma_rmb();
242 
243 	return cqe;
244 }
245 
246 static inline
mlx5_cqwq_get_cqe_enahnced_comp(struct mlx5_cqwq * wq)247 struct mlx5_cqe64 *mlx5_cqwq_get_cqe_enahnced_comp(struct mlx5_cqwq *wq)
248 {
249 	u8 sw_validity_iteration_count = mlx5_cqwq_get_wrap_cnt(wq) & 0xff;
250 	u32 ci = mlx5_cqwq_get_ci(wq);
251 	struct mlx5_cqe64 *cqe;
252 
253 	cqe = mlx5_cqwq_get_wqe(wq, ci);
254 	if (cqe->validity_iteration_count != sw_validity_iteration_count)
255 		return NULL;
256 
257 	/* ensure cqe content is read after cqe ownership bit/validity byte */
258 	dma_rmb();
259 
260 	return cqe;
261 }
262 
mlx5_wq_ll_get_size(struct mlx5_wq_ll * wq)263 static inline u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
264 {
265 	return (u32)wq->fbc.sz_m1 + 1;
266 }
267 
mlx5_wq_ll_is_full(struct mlx5_wq_ll * wq)268 static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
269 {
270 	return wq->cur_sz == wq->fbc.sz_m1;
271 }
272 
mlx5_wq_ll_is_empty(struct mlx5_wq_ll * wq)273 static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
274 {
275 	return !wq->cur_sz;
276 }
277 
mlx5_wq_ll_missing(struct mlx5_wq_ll * wq)278 static inline int mlx5_wq_ll_missing(struct mlx5_wq_ll *wq)
279 {
280 	return wq->fbc.sz_m1 - wq->cur_sz;
281 }
282 
mlx5_wq_ll_get_wqe(struct mlx5_wq_ll * wq,u16 ix)283 static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
284 {
285 	return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
286 }
287 
mlx5_wq_ll_get_wqe_next_ix(struct mlx5_wq_ll * wq,u16 ix)288 static inline u16 mlx5_wq_ll_get_wqe_next_ix(struct mlx5_wq_ll *wq, u16 ix)
289 {
290 	struct mlx5_wqe_srq_next_seg *wqe = mlx5_wq_ll_get_wqe(wq, ix);
291 
292 	return be16_to_cpu(wqe->next_wqe_index);
293 }
294 
mlx5_wq_ll_push(struct mlx5_wq_ll * wq,u16 head_next)295 static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next)
296 {
297 	wq->head = head_next;
298 	wq->wqe_ctr++;
299 	wq->cur_sz++;
300 }
301 
mlx5_wq_ll_pop(struct mlx5_wq_ll * wq,__be16 ix,__be16 * next_tail_next)302 static inline void mlx5_wq_ll_pop(struct mlx5_wq_ll *wq, __be16 ix,
303 				  __be16 *next_tail_next)
304 {
305 	*wq->tail_next = ix;
306 	wq->tail_next = next_tail_next;
307 	wq->cur_sz--;
308 }
309 
mlx5_wq_ll_update_db_record(struct mlx5_wq_ll * wq)310 static inline void mlx5_wq_ll_update_db_record(struct mlx5_wq_ll *wq)
311 {
312 	*wq->db = cpu_to_be32(wq->wqe_ctr);
313 }
314 
mlx5_wq_ll_get_head(struct mlx5_wq_ll * wq)315 static inline u16 mlx5_wq_ll_get_head(struct mlx5_wq_ll *wq)
316 {
317 	return wq->head;
318 }
319 
mlx5_wq_ll_get_counter(struct mlx5_wq_ll * wq)320 static inline u16 mlx5_wq_ll_get_counter(struct mlx5_wq_ll *wq)
321 {
322 	return wq->wqe_ctr;
323 }
324 
325 #endif /* __MLX5_WQ_H__ */
326