1 /*-
2 * Copyright (c) 2013-2020, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 #ifndef MLX5_IB_H
27 #define MLX5_IB_H
28
29 #include <linux/kernel.h>
30 #include <linux/sched.h>
31 #include <linux/printk.h>
32 #include <linux/netdevice.h>
33 #include <rdma/ib_verbs.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/ib_smi.h>
36 #include <dev/mlx5/cq.h>
37 #include <dev/mlx5/qp.h>
38 #include <dev/mlx5/srq.h>
39 #include <linux/types.h>
40 #include <dev/mlx5/mlx5_core/transobj.h>
41 #include <rdma/ib_user_verbs.h>
42 #include <rdma/mlx5-abi.h>
43 #include <rdma/uverbs_ioctl.h>
44
45 #define mlx5_ib_dbg(dev, format, arg...) \
46 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
47 __LINE__, current->pid, ##arg)
48
49 #define mlx5_ib_err(dev, format, arg...) \
50 pr_err("%s: ERR: %s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
51 __LINE__, current->pid, ##arg)
52
53 #define mlx5_ib_warn(dev, format, arg...) \
54 pr_warn("%s: WARN: %s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
55 __LINE__, current->pid, ##arg)
56
57 #define field_avail(type, fld, sz) (offsetof(type, fld) + \
58 sizeof(((type *)0)->fld) <= (sz))
59 #define MLX5_IB_DEFAULT_UIDX 0xffffff
60 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
61
62 #define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
63
64 enum {
65 MLX5_IB_MMAP_CMD_SHIFT = 8,
66 MLX5_IB_MMAP_CMD_MASK = 0xff,
67 };
68
69 enum {
70 MLX5_RES_SCAT_DATA32_CQE = 0x1,
71 MLX5_RES_SCAT_DATA64_CQE = 0x2,
72 MLX5_REQ_SCAT_DATA32_CQE = 0x11,
73 MLX5_REQ_SCAT_DATA64_CQE = 0x22,
74 };
75
76 enum mlx5_ib_latency_class {
77 MLX5_IB_LATENCY_CLASS_LOW,
78 MLX5_IB_LATENCY_CLASS_MEDIUM,
79 MLX5_IB_LATENCY_CLASS_HIGH,
80 MLX5_IB_LATENCY_CLASS_FAST_PATH
81 };
82
83 enum mlx5_ib_mad_ifc_flags {
84 MLX5_MAD_IFC_IGNORE_MKEY = 1,
85 MLX5_MAD_IFC_IGNORE_BKEY = 2,
86 MLX5_MAD_IFC_NET_VIEW = 4,
87 };
88
89 enum {
90 MLX5_CROSS_CHANNEL_BFREG = 0,
91 };
92
93 enum {
94 MLX5_CQE_VERSION_V0,
95 MLX5_CQE_VERSION_V1,
96 };
97
98 enum {
99 MLX5_IB_INVALID_UAR_INDEX = BIT(31),
100 MLX5_IB_INVALID_BFREG = BIT(31),
101 };
102
103 enum mlx5_ib_mmap_type {
104 MLX5_IB_MMAP_TYPE_MEMIC = 1,
105 MLX5_IB_MMAP_TYPE_VAR = 2,
106 MLX5_IB_MMAP_TYPE_UAR_WC = 3,
107 MLX5_IB_MMAP_TYPE_UAR_NC = 4,
108 };
109
110 struct mlx5_bfreg_info {
111 u32 *sys_pages;
112 int num_low_latency_bfregs;
113 unsigned int *count;
114
115 /*
116 * protect bfreg allocation data structs
117 */
118 struct mutex lock;
119 u32 ver;
120 u8 lib_uar_4k : 1;
121 u8 lib_uar_dyn : 1;
122 u32 num_sys_pages;
123 u32 num_static_sys_pages;
124 u32 total_num_bfregs;
125 u32 num_dyn_bfregs;
126 };
127
128 struct mlx5_ib_ucontext {
129 struct ib_ucontext ibucontext;
130 struct list_head db_page_list;
131
132 /* protect doorbell record alloc/free
133 */
134 struct mutex db_page_mutex;
135 struct mlx5_bfreg_info bfregi;
136 u8 cqe_version;
137 /* Transport Domain number */
138 u32 tdn;
139
140 u64 lib_caps;
141 u16 devx_uid;
142 };
143
to_mucontext(struct ib_ucontext * ibucontext)144 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
145 {
146 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
147 }
148
149 struct mlx5_ib_pd {
150 struct ib_pd ibpd;
151 u32 pdn;
152 u16 uid;
153 };
154
155 #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
156 #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
157 #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
158 #error "Invalid number of bypass priorities"
159 #endif
160 #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
161
162 #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
163 #define MLX5_IB_NUM_SNIFFER_FTS 2
164 struct mlx5_ib_flow_prio {
165 struct mlx5_flow_table *flow_table;
166 unsigned int refcount;
167 };
168
169 struct mlx5_ib_flow_handler {
170 struct list_head list;
171 struct ib_flow ibflow;
172 struct mlx5_ib_flow_prio *prio;
173 struct mlx5_flow_rule *rule;
174 };
175
176 struct mlx5_ib_flow_db {
177 struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
178 struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
179 struct mlx5_flow_table *lag_demux_ft;
180 /* Protect flow steering bypass flow tables
181 * when add/del flow rules.
182 * only single add/removal of flow steering rule could be done
183 * simultaneously.
184 */
185 struct mutex lock;
186 };
187
188 /* Use macros here so that don't have to duplicate
189 * enum ib_send_flags and enum ib_qp_type for low-level driver
190 */
191
192 #define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START
193 #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1)
194 #define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2)
195
196 #define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 3)
197 #define MLX5_IB_SEND_UMR_UPDATE_PD (IB_SEND_RESERVED_START << 4)
198 #define MLX5_IB_SEND_UMR_UPDATE_ACCESS IB_SEND_RESERVED_END
199
200 #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
201 /*
202 * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
203 * creates the actual hardware QP.
204 */
205 #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
206 #define MLX5_IB_QPT_DCI IB_QPT_RESERVED3
207 #define MLX5_IB_QPT_DCT IB_QPT_RESERVED4
208 #define MLX5_IB_WR_UMR IB_WR_RESERVED1
209
210 /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
211 *
212 * These flags are intended for internal use by the mlx5_ib driver, and they
213 * rely on the range reserved for that use in the ib_qp_create_flags enum.
214 */
215 #define MLX5_IB_QP_CREATE_SQPN_QP1 IB_QP_CREATE_RESERVED_START
216 #define MLX5_IB_QP_CREATE_WC_TEST (IB_QP_CREATE_RESERVED_START << 1)
217
218 struct wr_list {
219 u16 opcode;
220 u16 next;
221 };
222
223 struct mlx5_ib_wq {
224 u64 *wrid;
225 u32 *wr_data;
226 struct wr_list *w_list;
227 unsigned *wqe_head;
228 u16 unsig_count;
229
230 /* serialize post to the work queue
231 */
232 spinlock_t lock;
233 int wqe_cnt;
234 int max_post;
235 int max_gs;
236 int offset;
237 int wqe_shift;
238 unsigned head;
239 unsigned tail;
240 u16 cur_post;
241 u16 last_poll;
242 void *qend;
243 };
244
245 struct mlx5_ib_rwq {
246 struct ib_wq ibwq;
247 struct mlx5_core_qp core_qp;
248 u32 rq_num_pas;
249 u32 log_rq_stride;
250 u32 log_rq_size;
251 u32 rq_page_offset;
252 u32 log_page_size;
253 struct ib_umem *umem;
254 size_t buf_size;
255 unsigned int page_shift;
256 int create_type;
257 struct mlx5_db db;
258 u32 user_index;
259 u32 wqe_count;
260 u32 wqe_shift;
261 int wq_sig;
262 };
263
264 enum {
265 MLX5_QP_USER,
266 MLX5_QP_KERNEL,
267 MLX5_QP_EMPTY
268 };
269
270 enum {
271 MLX5_WQ_USER,
272 MLX5_WQ_KERNEL
273 };
274
275 struct mlx5_ib_rwq_ind_table {
276 struct ib_rwq_ind_table ib_rwq_ind_tbl;
277 u32 rqtn;
278 u16 uid;
279 };
280
281 /*
282 * Connect-IB can trigger up to four concurrent pagefaults
283 * per-QP.
284 */
285 enum mlx5_ib_pagefault_context {
286 MLX5_IB_PAGEFAULT_RESPONDER_READ,
287 MLX5_IB_PAGEFAULT_REQUESTOR_READ,
288 MLX5_IB_PAGEFAULT_RESPONDER_WRITE,
289 MLX5_IB_PAGEFAULT_REQUESTOR_WRITE,
290 MLX5_IB_PAGEFAULT_CONTEXTS
291 };
292
293 static inline enum mlx5_ib_pagefault_context
mlx5_ib_get_pagefault_context(struct mlx5_pagefault * pagefault)294 mlx5_ib_get_pagefault_context(struct mlx5_pagefault *pagefault)
295 {
296 return pagefault->flags & (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE);
297 }
298
299 struct mlx5_ib_pfault {
300 struct work_struct work;
301 struct mlx5_pagefault mpfault;
302 };
303
304 struct mlx5_ib_ubuffer {
305 struct ib_umem *umem;
306 int buf_size;
307 u64 buf_addr;
308 };
309
310 struct mlx5_ib_qp_base {
311 struct mlx5_ib_qp *container_mibqp;
312 struct mlx5_core_qp mqp;
313 struct mlx5_ib_ubuffer ubuffer;
314 };
315
316 struct mlx5_ib_qp_trans {
317 struct mlx5_ib_qp_base base;
318 u16 xrcdn;
319 u8 alt_port;
320 u8 atomic_rd_en;
321 u8 resp_depth;
322 };
323
324 struct mlx5_ib_rss_qp {
325 u32 tirn;
326 };
327
328 struct mlx5_ib_rq {
329 struct mlx5_ib_qp_base base;
330 struct mlx5_ib_wq *rq;
331 struct mlx5_ib_ubuffer ubuffer;
332 struct mlx5_db *doorbell;
333 u32 tirn;
334 u8 state;
335 };
336
337 struct mlx5_ib_sq {
338 struct mlx5_ib_qp_base base;
339 struct mlx5_ib_wq *sq;
340 struct mlx5_ib_ubuffer ubuffer;
341 struct mlx5_db *doorbell;
342 u32 tisn;
343 u8 state;
344 };
345
346 struct mlx5_ib_raw_packet_qp {
347 struct mlx5_ib_sq sq;
348 struct mlx5_ib_rq rq;
349 };
350
351 struct mlx5_bf {
352 int buf_size;
353 unsigned long offset;
354 struct mlx5_sq_bfreg *bfreg;
355 spinlock_t lock32;
356 };
357
358 struct mlx5_ib_dct {
359 struct mlx5_core_dct mdct;
360 u32 *in;
361 };
362
363 struct mlx5_ib_qp {
364 struct ib_qp ibqp;
365 union {
366 struct mlx5_ib_qp_trans trans_qp;
367 struct mlx5_ib_raw_packet_qp raw_packet_qp;
368 struct mlx5_ib_rss_qp rss_qp;
369 struct mlx5_ib_dct dct;
370 };
371 struct mlx5_buf buf;
372
373 struct mlx5_db db;
374 struct mlx5_ib_wq rq;
375
376 u8 sq_signal_bits;
377 u8 fm_cache;
378 struct mlx5_ib_wq sq;
379
380 /* serialize qp state modifications
381 */
382 struct mutex mutex;
383 u32 flags;
384 u8 port;
385 u8 state;
386 int wq_sig;
387 int scat_cqe;
388 int max_inline_data;
389 struct mlx5_bf bf;
390 int has_rq;
391
392 /* only for user space QPs. For kernel
393 * we have it from the bf object
394 */
395 int bfregn;
396
397 int create_type;
398
399 /* Store signature errors */
400 bool signature_en;
401
402 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
403 /*
404 * A flag that is true for QP's that are in a state that doesn't
405 * allow page faults, and shouldn't schedule any more faults.
406 */
407 int disable_page_faults;
408 /*
409 * The disable_page_faults_lock protects a QP's disable_page_faults
410 * field, allowing for a thread to atomically check whether the QP
411 * allows page faults, and if so schedule a page fault.
412 */
413 spinlock_t disable_page_faults_lock;
414 struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS];
415 #endif
416 struct list_head qps_list;
417 struct list_head cq_recv_list;
418 struct list_head cq_send_list;
419 };
420
421 struct mlx5_ib_cq_buf {
422 struct mlx5_buf buf;
423 struct ib_umem *umem;
424 int cqe_size;
425 int nent;
426 };
427
428 enum mlx5_ib_qp_flags {
429 MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
430 MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
431 MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL,
432 MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND,
433 MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV,
434 MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5,
435 /* QP uses 1 as its source QP number */
436 MLX5_IB_QP_SQPN_QP1 = 1 << 6,
437 MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
438 MLX5_IB_QP_RSS = 1 << 8,
439 MLX5_IB_QP_UNDERLAY = 1 << 10,
440 };
441
442 struct mlx5_umr_wr {
443 struct ib_send_wr wr;
444 union {
445 u64 virt_addr;
446 u64 offset;
447 } target;
448 struct ib_pd *pd;
449 unsigned int page_shift;
450 unsigned int npages;
451 u32 length;
452 int access_flags;
453 u32 mkey;
454 };
455
umr_wr(const struct ib_send_wr * wr)456 static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
457 {
458 return container_of(wr, struct mlx5_umr_wr, wr);
459 }
460
461 struct mlx5_shared_mr_info {
462 int mr_id;
463 struct ib_umem *umem;
464 };
465
466 struct mlx5_ib_cq {
467 struct ib_cq ibcq;
468 struct mlx5_core_cq mcq;
469 struct mlx5_ib_cq_buf buf;
470 struct mlx5_db db;
471
472 /* serialize access to the CQ
473 */
474 spinlock_t lock;
475
476 /* protect resize cq
477 */
478 struct mutex resize_mutex;
479 struct mlx5_ib_cq_buf *resize_buf;
480 struct ib_umem *resize_umem;
481 int cqe_size;
482 struct list_head list_send_qp;
483 struct list_head list_recv_qp;
484 u32 create_flags;
485 struct list_head wc_list;
486 enum ib_cq_notify_flags notify_flags;
487 struct work_struct notify_work;
488 };
489
490 struct mlx5_ib_wc {
491 struct ib_wc wc;
492 struct list_head list;
493 };
494
495 struct mlx5_ib_srq {
496 struct ib_srq ibsrq;
497 struct mlx5_core_srq msrq;
498 struct mlx5_buf buf;
499 struct mlx5_db db;
500 u64 *wrid;
501 /* protect SRQ hanlding
502 */
503 spinlock_t lock;
504 int head;
505 int tail;
506 u16 wqe_ctr;
507 struct ib_umem *umem;
508 /* serialize arming a SRQ
509 */
510 struct mutex mutex;
511 int wq_sig;
512 };
513
514 struct mlx5_ib_xrcd {
515 struct ib_xrcd ibxrcd;
516 u32 xrcdn;
517 };
518
519 enum mlx5_ib_mtt_access_flags {
520 MLX5_IB_MTT_READ = (1 << 0),
521 MLX5_IB_MTT_WRITE = (1 << 1),
522 };
523
524 struct mlx5_user_mmap_entry {
525 struct rdma_user_mmap_entry rdma_entry;
526 u8 mmap_flag;
527 u64 address;
528 u32 page_idx;
529 };
530
531 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
532
533 struct mlx5_ib_mr {
534 struct ib_mr ibmr;
535 void *descs;
536 dma_addr_t desc_map;
537 int ndescs;
538 int max_descs;
539 int desc_size;
540 int access_mode;
541 struct mlx5_core_mkey mmkey;
542 struct ib_umem *umem;
543 struct mlx5_shared_mr_info *smr_info;
544 struct list_head list;
545 int order;
546 int umred;
547 int npages;
548 struct mlx5_ib_dev *dev;
549 u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
550 struct mlx5_core_sig_ctx *sig;
551 int live;
552 void *descs_alloc;
553 int access_flags; /* Needed for rereg MR */
554 struct mlx5_async_work cb_work;
555 };
556
557 struct mlx5_ib_mw {
558 struct ib_mw ibmw;
559 struct mlx5_core_mkey mmkey;
560 };
561
562 struct mlx5_ib_devx_mr {
563 struct mlx5_core_mkey mmkey;
564 int ndescs;
565 };
566
567 struct mlx5_ib_umr_context {
568 struct ib_cqe cqe;
569 enum ib_wc_status status;
570 struct completion done;
571 };
572
573 struct umr_common {
574 struct ib_pd *pd;
575 struct ib_cq *cq;
576 struct ib_qp *qp;
577 /* control access to UMR QP
578 */
579 struct semaphore sem;
580 };
581
582 enum {
583 MLX5_FMR_INVALID,
584 MLX5_FMR_VALID,
585 MLX5_FMR_BUSY,
586 };
587
588 struct mlx5_cache_ent {
589 struct list_head head;
590 /* sync access to the cahce entry
591 */
592 spinlock_t lock;
593
594
595 char name[4];
596 u32 order;
597 u32 size;
598 u32 cur;
599 u32 miss;
600 u32 limit;
601
602 struct mlx5_ib_dev *dev;
603 struct work_struct work;
604 struct delayed_work dwork;
605 int pending;
606 };
607
608 struct mlx5_mr_cache {
609 struct workqueue_struct *wq;
610 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
611 int stopped;
612 unsigned long last_add;
613 };
614
615 struct mlx5_ib_gsi_qp;
616
617 struct mlx5_ib_port_resources {
618 struct mlx5_ib_resources *devr;
619 struct mlx5_ib_gsi_qp *gsi;
620 struct work_struct pkey_change_work;
621 };
622
623 struct mlx5_ib_resources {
624 struct ib_cq *c0;
625 struct ib_xrcd *x0;
626 struct ib_xrcd *x1;
627 struct ib_pd *p0;
628 struct ib_srq *s0;
629 struct ib_srq *s1;
630 struct mlx5_ib_port_resources ports[2];
631 /* Protects changes to the port resources */
632 struct mutex mutex;
633 };
634
635 struct mlx5_ib_port {
636 u16 q_cnt_id;
637 };
638
639 struct mlx5_roce {
640 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
641 * netdev pointer
642 */
643 rwlock_t netdev_lock;
644 if_t netdev;
645 struct notifier_block nb;
646 atomic_t next_port;
647 };
648
649 #define MLX5_IB_STATS_COUNT(a,...) a
650 #define MLX5_IB_STATS_VAR(a,b,c,...) b c;
651 #define MLX5_IB_STATS_DESC(a,b,c,d,e,...) d, e,
652
653 #define MLX5_IB_CONG_PARAMS(m) \
654 /* ECN RP */ \
655 m(+1, u64, rp_clamp_tgt_rate, "rp_clamp_tgt_rate", "If set, whenever a CNP is processed, the target rate is updated to be the current rate") \
656 m(+1, u64, rp_clamp_tgt_rate_ati, "rp_clamp_tgt_rate_ati", "If set, when receiving a CNP, the target rate should be updated if the transission rate was increased due to the timer, and not only due to the byte counter") \
657 m(+1, u64, rp_time_reset, "rp_time_reset", "Time in microseconds between rate increases if no CNPs are received") \
658 m(+1, u64, rp_byte_reset, "rp_byte_reset", "Transmitted data in bytes between rate increases if no CNP's are received. A value of zero means disabled.") \
659 m(+1, u64, rp_threshold, "rp_threshold", "The number of times rpByteStage or rpTimeStage can count before the RP rate control state machine advances states") \
660 m(+1, u64, rp_ai_rate, "rp_ai_rate", "The rate, in Mbits per second, used to increase rpTargetRate in the active increase state") \
661 m(+1, u64, rp_hai_rate, "rp_hai_rate", "The rate, in Mbits per second, used to increase rpTargetRate in the hyper increase state") \
662 m(+1, u64, rp_min_dec_fac, "rp_min_dec_fac", "The minimum factor by which the current transmit rate can be changed when processing a CNP. Value is given as a percentage, [1 .. 100]") \
663 m(+1, u64, rp_min_rate, "rp_min_rate", "The minimum value, in Mbps per second, for rate to limit") \
664 m(+1, u64, rp_rate_to_set_on_first_cnp, "rp_rate_to_set_on_first_cnp", "The rate that is set for the flow when a rate limiter is allocated to it upon first CNP received, in Mbps. A value of zero means use full port speed") \
665 m(+1, u64, rp_dce_tcp_g, "rp_dce_tcp_g", "Used to update the congestion estimator, alpha, once every dce_tcp_rtt once every dce_tcp_rtt microseconds") \
666 m(+1, u64, rp_dce_tcp_rtt, "rp_dce_tcp_rtt", "The time between updates of the aolpha value, in microseconds") \
667 m(+1, u64, rp_rate_reduce_monitor_period, "rp_rate_reduce_monitor_period", "The minimum time between two consecutive rate reductions for a single flow") \
668 m(+1, u64, rp_initial_alpha_value, "rp_initial_alpha_value", "The initial value of alpha to use when receiving the first CNP for a flow") \
669 m(+1, u64, rp_gd, "rp_gd", "If a CNP is received, the flow rate is reduced at the beginning of the next rate_reduce_monitor_period interval") \
670 /* ECN NP */ \
671 m(+1, u64, np_cnp_dscp, "np_cnp_dscp", "The DiffServ Code Point of the generated CNP for this port") \
672 m(+1, u64, np_cnp_prio_mode, "np_cnp_prio_mode", "The 802.1p priority value of the generated CNP for this port") \
673 m(+1, u64, np_cnp_prio, "np_cnp_prio", "The 802.1p priority value of the generated CNP for this port")
674
675 #define MLX5_IB_CONG_PARAMS_NUM (0 MLX5_IB_CONG_PARAMS(MLX5_IB_STATS_COUNT))
676
677 #define MLX5_IB_CONG_STATS(m) \
678 m(+1, u64, syndrome, "syndrome", "Syndrome number") \
679 m(+1, u64, rp_cur_flows, "rp_cur_flows", "Number of flows limited") \
680 m(+1, u64, sum_flows, "sum_flows", "Sum of the number of flows limited over time") \
681 m(+1, u64, rp_cnp_ignored, "rp_cnp_ignored", "Number of CNPs and CNMs ignored") \
682 m(+1, u64, rp_cnp_handled, "rp_cnp_handled", "Number of CNPs and CNMs successfully handled") \
683 m(+1, u64, time_stamp, "time_stamp", "Time stamp in microseconds") \
684 m(+1, u64, accumulators_period, "accumulators_period", "The value of X variable for accumulating counters") \
685 m(+1, u64, np_ecn_marked_roce_packets, "np_ecn_marked_roce_packets", "Number of ECN marked packets seen") \
686 m(+1, u64, np_cnp_sent, "np_cnp_sent", "Number of CNPs sent")
687
688 #define MLX5_IB_CONG_STATS_NUM (0 MLX5_IB_CONG_STATS(MLX5_IB_STATS_COUNT))
689
690 #define MLX5_IB_CONG_STATUS(m) \
691 /* ECN RP */ \
692 m(+1, u64, rp_0_enable, "rp_0_enable", "Enable reaction point, priority 0", MLX5_IB_RROCE_ECN_RP, 0, enable) \
693 m(+1, u64, rp_1_enable, "rp_1_enable", "Enable reaction point, priority 1", MLX5_IB_RROCE_ECN_RP, 1, enable) \
694 m(+1, u64, rp_2_enable, "rp_2_enable", "Enable reaction point, priority 2", MLX5_IB_RROCE_ECN_RP, 2, enable) \
695 m(+1, u64, rp_3_enable, "rp_3_enable", "Enable reaction point, priority 3", MLX5_IB_RROCE_ECN_RP, 3, enable) \
696 m(+1, u64, rp_4_enable, "rp_4_enable", "Enable reaction point, priority 4", MLX5_IB_RROCE_ECN_RP, 4, enable) \
697 m(+1, u64, rp_5_enable, "rp_5_enable", "Enable reaction point, priority 5", MLX5_IB_RROCE_ECN_RP, 5, enable) \
698 m(+1, u64, rp_6_enable, "rp_6_enable", "Enable reaction point, priority 6", MLX5_IB_RROCE_ECN_RP, 6, enable) \
699 m(+1, u64, rp_7_enable, "rp_7_enable", "Enable reaction point, priority 7", MLX5_IB_RROCE_ECN_RP, 7, enable) \
700 m(+1, u64, rp_8_enable, "rp_8_enable", "Enable reaction point, priority 8", MLX5_IB_RROCE_ECN_RP, 8, enable) \
701 m(+1, u64, rp_9_enable, "rp_9_enable", "Enable reaction point, priority 9", MLX5_IB_RROCE_ECN_RP, 9, enable) \
702 m(+1, u64, rp_10_enable, "rp_10_enable", "Enable reaction point, priority 10", MLX5_IB_RROCE_ECN_RP, 10, enable) \
703 m(+1, u64, rp_11_enable, "rp_11_enable", "Enable reaction point, priority 11", MLX5_IB_RROCE_ECN_RP, 11, enable) \
704 m(+1, u64, rp_12_enable, "rp_12_enable", "Enable reaction point, priority 12", MLX5_IB_RROCE_ECN_RP, 12, enable) \
705 m(+1, u64, rp_13_enable, "rp_13_enable", "Enable reaction point, priority 13", MLX5_IB_RROCE_ECN_RP, 13, enable) \
706 m(+1, u64, rp_14_enable, "rp_14_enable", "Enable reaction point, priority 14", MLX5_IB_RROCE_ECN_RP, 14, enable) \
707 m(+1, u64, rp_15_enable, "rp_15_enable", "Enable reaction point, priority 15", MLX5_IB_RROCE_ECN_RP, 15, enable) \
708 /* ECN NP */ \
709 m(+1, u64, np_0_enable, "np_0_enable", "Enable notification point, priority 0", MLX5_IB_RROCE_ECN_NP, 0, enable) \
710 m(+1, u64, np_1_enable, "np_1_enable", "Enable notification point, priority 1", MLX5_IB_RROCE_ECN_NP, 1, enable) \
711 m(+1, u64, np_2_enable, "np_2_enable", "Enable notification point, priority 2", MLX5_IB_RROCE_ECN_NP, 2, enable) \
712 m(+1, u64, np_3_enable, "np_3_enable", "Enable notification point, priority 3", MLX5_IB_RROCE_ECN_NP, 3, enable) \
713 m(+1, u64, np_4_enable, "np_4_enable", "Enable notification point, priority 4", MLX5_IB_RROCE_ECN_NP, 4, enable) \
714 m(+1, u64, np_5_enable, "np_5_enable", "Enable notification point, priority 5", MLX5_IB_RROCE_ECN_NP, 5, enable) \
715 m(+1, u64, np_6_enable, "np_6_enable", "Enable notification point, priority 6", MLX5_IB_RROCE_ECN_NP, 6, enable) \
716 m(+1, u64, np_7_enable, "np_7_enable", "Enable notification point, priority 7", MLX5_IB_RROCE_ECN_NP, 7, enable) \
717 m(+1, u64, np_8_enable, "np_8_enable", "Enable notification point, priority 8", MLX5_IB_RROCE_ECN_NP, 8, enable) \
718 m(+1, u64, np_9_enable, "np_9_enable", "Enable notification point, priority 9", MLX5_IB_RROCE_ECN_NP, 9, enable) \
719 m(+1, u64, np_10_enable, "np_10_enable", "Enable notification point, priority 10", MLX5_IB_RROCE_ECN_NP, 10, enable) \
720 m(+1, u64, np_11_enable, "np_11_enable", "Enable notification point, priority 11", MLX5_IB_RROCE_ECN_NP, 11, enable) \
721 m(+1, u64, np_12_enable, "np_12_enable", "Enable notification point, priority 12", MLX5_IB_RROCE_ECN_NP, 12, enable) \
722 m(+1, u64, np_13_enable, "np_13_enable", "Enable notification point, priority 13", MLX5_IB_RROCE_ECN_NP, 13, enable) \
723 m(+1, u64, np_14_enable, "np_14_enable", "Enable notification point, priority 14", MLX5_IB_RROCE_ECN_NP, 14, enable) \
724 m(+1, u64, np_15_enable, "np_15_enable", "Enable notification point, priority 15", MLX5_IB_RROCE_ECN_NP, 15, enable) \
725
726 #define MLX5_IB_CONG_STATUS_NUM (0 MLX5_IB_CONG_STATUS(MLX5_IB_STATS_COUNT))
727
728 struct mlx5_ib_congestion {
729 struct sysctl_ctx_list ctx;
730 struct sx lock;
731 struct delayed_work dwork;
732 union {
733 u64 arg[1];
734 struct {
735 MLX5_IB_CONG_PARAMS(MLX5_IB_STATS_VAR)
736 MLX5_IB_CONG_STATS(MLX5_IB_STATS_VAR)
737 MLX5_IB_CONG_STATUS(MLX5_IB_STATS_VAR)
738 };
739 };
740 };
741
742 struct mlx5_devx_event_table {
743 /* serialize updating the event_xa */
744 struct mutex event_xa_lock;
745 struct xarray event_xa;
746 };
747
748 struct mlx5_ib_dev {
749 struct ib_device ib_dev;
750 struct mlx5_core_dev *mdev;
751 struct mlx5_roce roce;
752 MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
753 int num_ports;
754 /* serialize update of capability mask
755 */
756 struct mutex cap_mask_mutex;
757 u8 ib_active:1;
758 u8 wc_support:1;
759 struct umr_common umrc;
760 /* sync used page count stats
761 */
762 struct mlx5_ib_resources devr;
763 struct mlx5_mr_cache cache;
764 struct timer_list delay_timer;
765 /* Prevents soft lock on massive reg MRs */
766 struct mutex slow_path_mutex;
767 int fill_delay;
768 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
769 struct ib_odp_caps odp_caps;
770 /*
771 * Sleepable RCU that prevents destruction of MRs while they are still
772 * being used by a page fault handler.
773 */
774 struct srcu_struct mr_srcu;
775 #endif
776 struct mlx5_ib_flow_db flow_db;
777 /* protect resources needed as part of reset flow */
778 spinlock_t reset_flow_resource_lock;
779 struct list_head qp_list;
780 /* Array with num_ports elements */
781 struct mlx5_ib_port *port;
782 struct mlx5_sq_bfreg bfreg;
783 struct mlx5_sq_bfreg wc_bfreg;
784 struct mlx5_sq_bfreg fp_bfreg;
785 struct mlx5_devx_event_table devx_event_table;
786 struct mlx5_ib_congestion congestion;
787
788 struct mlx5_async_ctx async_ctx;
789
790 /* protect the user_td */
791 struct mutex lb_mutex;
792 u32 user_td;
793 };
794
to_mibcq(struct mlx5_core_cq * mcq)795 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
796 {
797 return container_of(mcq, struct mlx5_ib_cq, mcq);
798 }
799
to_mxrcd(struct ib_xrcd * ibxrcd)800 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
801 {
802 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
803 }
804
to_mdev(struct ib_device * ibdev)805 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
806 {
807 return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
808 }
809
mlx5_udata_to_mdev(struct ib_udata * udata)810 static inline struct mlx5_ib_dev *mlx5_udata_to_mdev(struct ib_udata *udata)
811 {
812 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
813 udata, struct mlx5_ib_ucontext, ibucontext);
814
815 return to_mdev(context->ibucontext.device);
816 }
817
to_mcq(struct ib_cq * ibcq)818 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
819 {
820 return container_of(ibcq, struct mlx5_ib_cq, ibcq);
821 }
822
to_mibqp(struct mlx5_core_qp * mqp)823 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
824 {
825 return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
826 }
827
to_mibrwq(struct mlx5_core_qp * core_qp)828 static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
829 {
830 return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
831 }
832
to_mibmr(struct mlx5_core_mkey * mmkey)833 static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey)
834 {
835 return container_of(mmkey, struct mlx5_ib_mr, mmkey);
836 }
837
to_mpd(struct ib_pd * ibpd)838 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
839 {
840 return container_of(ibpd, struct mlx5_ib_pd, ibpd);
841 }
842
to_msrq(struct ib_srq * ibsrq)843 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
844 {
845 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
846 }
847
to_mqp(struct ib_qp * ibqp)848 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
849 {
850 return container_of(ibqp, struct mlx5_ib_qp, ibqp);
851 }
852
to_mrwq(struct ib_wq * ibwq)853 static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
854 {
855 return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
856 }
857
to_mrwq_ind_table(struct ib_rwq_ind_table * ib_rwq_ind_tbl)858 static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
859 {
860 return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
861 }
862
to_mibsrq(struct mlx5_core_srq * msrq)863 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
864 {
865 return container_of(msrq, struct mlx5_ib_srq, msrq);
866 }
867
to_mmr(struct ib_mr * ibmr)868 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
869 {
870 return container_of(ibmr, struct mlx5_ib_mr, ibmr);
871 }
872
to_mmw(struct ib_mw * ibmw)873 static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
874 {
875 return container_of(ibmw, struct mlx5_ib_mw, ibmw);
876 }
877
878 struct mlx5_ib_ah {
879 struct ib_ah ibah;
880 struct mlx5_av av;
881 };
882
to_mah(struct ib_ah * ibah)883 static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
884 {
885 return container_of(ibah, struct mlx5_ib_ah, ibah);
886 }
887
888 static inline struct mlx5_user_mmap_entry *
to_mmmap(struct rdma_user_mmap_entry * rdma_entry)889 to_mmmap(struct rdma_user_mmap_entry *rdma_entry)
890 {
891 return container_of(rdma_entry,
892 struct mlx5_user_mmap_entry, rdma_entry);
893 }
894
895 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
896 struct mlx5_db *db);
897 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
898 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
899 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
900 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
901 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
902 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
903 const void *in_mad, void *response_mad);
904 int mlx5_ib_create_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr, u32 flags,
905 struct ib_udata *udata);
906 int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
907 void mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags);
908 int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
909 struct ib_udata *udata);
910 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
911 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
912 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
913 void mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
914 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
915 const struct ib_recv_wr **bad_wr);
916 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
917 struct ib_qp_init_attr *init_attr,
918 struct ib_udata *udata);
919 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
920 int attr_mask, struct ib_udata *udata);
921 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
922 struct ib_qp_init_attr *qp_init_attr);
923 int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
924 int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
925 const struct ib_send_wr **bad_wr);
926 int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
927 const struct ib_recv_wr **bad_wr);
928 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
929 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
930 void *buffer, u32 length,
931 struct mlx5_ib_qp_base *base);
932 int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
933 struct ib_udata *udata);
934 void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
935 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
936 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
937 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
938 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
939 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
940 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
941 u64 virt_addr, int access_flags,
942 struct ib_udata *udata);
943 struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
944 struct ib_udata *udata);
945 int mlx5_ib_dealloc_mw(struct ib_mw *mw);
946 int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
947 int npages, int zap);
948 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
949 u64 length, u64 virt_addr, int access_flags,
950 struct ib_pd *pd, struct ib_udata *udata);
951 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
952 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
953 u32 max_num_sg, struct ib_udata *udata);
954 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
955 unsigned int *sg_offset);
956 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
957 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
958 const struct ib_mad_hdr *in, size_t in_mad_size,
959 struct ib_mad_hdr *out, size_t *out_mad_size,
960 u16 *out_mad_pkey_index);
961 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
962 struct ib_udata *udata);
963 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
964 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
965 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
966 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
967 struct ib_smp *out_mad);
968 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
969 __be64 *sys_image_guid);
970 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
971 u16 *max_pkeys);
972 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
973 u32 *vendor_id);
974 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
975 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
976 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
977 u16 *pkey);
978 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
979 union ib_gid *gid);
980 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
981 struct ib_port_attr *props);
982 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
983 struct ib_port_attr *props);
984 int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
985 void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
986 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
987 unsigned long max_page_shift,
988 int *count, int *shift,
989 int *ncont, int *order);
990 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
991 int page_shift, size_t offset, size_t num_pages,
992 __be64 *pas, int access_flags);
993 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
994 int page_shift, __be64 *pas, int access_flags);
995 void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
996 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
997 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
998 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
999 int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
1000 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1001 struct ib_mr_status *mr_status);
1002 struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
1003 struct ib_wq_init_attr *init_attr,
1004 struct ib_udata *udata);
1005 void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
1006 int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
1007 u32 wq_attr_mask, struct ib_udata *udata);
1008 struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
1009 struct ib_rwq_ind_table_init_attr *init_attr,
1010 struct ib_udata *udata);
1011 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
1012
1013 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1014 extern struct workqueue_struct *mlx5_ib_page_fault_wq;
1015
1016 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
1017 void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
1018 struct mlx5_ib_pfault *pfault);
1019 void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
1020 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
1021 void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
1022 int __init mlx5_ib_odp_init(void);
1023 void mlx5_ib_odp_cleanup(void);
1024 void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
1025 void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
1026 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
1027 unsigned long end);
1028 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev * dev)1029 static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
1030 {
1031 return;
1032 }
1033
mlx5_ib_odp_create_qp(struct mlx5_ib_qp * qp)1034 static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {}
mlx5_ib_odp_init_one(struct mlx5_ib_dev * ibdev)1035 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
mlx5_ib_odp_remove_one(struct mlx5_ib_dev * ibdev)1036 static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
mlx5_ib_odp_init(void)1037 static inline int mlx5_ib_odp_init(void) { return 0; }
mlx5_ib_odp_cleanup(void)1038 static inline void mlx5_ib_odp_cleanup(void) {}
mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp * qp)1039 static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {}
mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp * qp)1040 static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
1041
1042 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1043
1044 int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
1045 u8 port, struct ifla_vf_info *info);
1046 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
1047 u8 port, int state);
1048 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
1049 u8 port, struct ifla_vf_stats *stats);
1050 int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
1051 u64 guid, int type);
1052
1053 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
1054 int index);
1055 int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
1056 int index, enum ib_gid_type *gid_type);
1057
1058 /* GSI QP helper functions */
1059 struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
1060 struct ib_qp_init_attr *init_attr);
1061 int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp);
1062 int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1063 int attr_mask);
1064 int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
1065 int qp_attr_mask,
1066 struct ib_qp_init_attr *qp_init_attr);
1067 int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
1068 const struct ib_send_wr **bad_wr);
1069 int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
1070 const struct ib_recv_wr **bad_wr);
1071 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
1072
1073 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
1074
1075 void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
1076 int bfregn);
1077
1078 #if 1 /* IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) */
1079 int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user);
1080 void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid);
1081 void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev);
1082 void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev);
1083 bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type);
1084 bool mlx5_ib_devx_is_flow_counter(void *obj, u32 offset, u32 *counter_id);
1085 #else
1086 static inline int
mlx5_ib_devx_create(struct mlx5_ib_dev * dev,bool is_user)1087 mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
1088 bool is_user) { return -EOPNOTSUPP; }
mlx5_ib_devx_destroy(struct mlx5_ib_dev * dev,u16 uid)1089 static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid) {}
mlx5_ib_devx_init_event_table(struct mlx5_ib_dev * dev)1090 static inline void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev) {}
mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev * dev)1091 static inline void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev) {}
mlx5_ib_devx_is_flow_dest(void * obj,int * dest_id,int * dest_type)1092 static inline bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id,
1093 int *dest_type)
1094 {
1095 return false;
1096 }
1097 #endif
1098
init_query_mad(struct ib_smp * mad)1099 static inline void init_query_mad(struct ib_smp *mad)
1100 {
1101 mad->base_version = 1;
1102 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
1103 mad->class_version = 1;
1104 mad->method = IB_MGMT_METHOD_GET;
1105 }
1106
convert_access(int acc)1107 static inline u8 convert_access(int acc)
1108 {
1109 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
1110 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
1111 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
1112 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
1113 MLX5_PERM_LOCAL_READ;
1114 }
1115
is_qp1(enum ib_qp_type qp_type)1116 static inline int is_qp1(enum ib_qp_type qp_type)
1117 {
1118 return qp_type == MLX5_IB_QPT_HW_GSI;
1119 }
1120
1121 #define MLX5_MAX_UMR_SHIFT 16
1122 #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
1123
check_cq_create_flags(u32 flags)1124 static inline u32 check_cq_create_flags(u32 flags)
1125 {
1126 /*
1127 * It returns non-zero value for unsupported CQ
1128 * create flags, otherwise it returns zero.
1129 */
1130 return (flags & ~(IB_CQ_FLAGS_IGNORE_OVERRUN |
1131 IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
1132 }
1133
verify_assign_uidx(u8 cqe_version,u32 cmd_uidx,u32 * user_index)1134 static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
1135 u32 *user_index)
1136 {
1137 if (cqe_version) {
1138 if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
1139 (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
1140 return -EINVAL;
1141 *user_index = cmd_uidx;
1142 } else {
1143 *user_index = MLX5_IB_DEFAULT_UIDX;
1144 }
1145
1146 return 0;
1147 }
1148
get_qp_user_index(struct mlx5_ib_ucontext * ucontext,struct mlx5_ib_create_qp * ucmd,int inlen,u32 * user_index)1149 static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
1150 struct mlx5_ib_create_qp *ucmd,
1151 int inlen,
1152 u32 *user_index)
1153 {
1154 u8 cqe_version = ucontext->cqe_version;
1155
1156 if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) &&
1157 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1158 return 0;
1159
1160 if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) !=
1161 !!cqe_version))
1162 return -EINVAL;
1163
1164 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1165 }
1166
get_srq_user_index(struct mlx5_ib_ucontext * ucontext,struct mlx5_ib_create_srq * ucmd,int inlen,u32 * user_index)1167 static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
1168 struct mlx5_ib_create_srq *ucmd,
1169 int inlen,
1170 u32 *user_index)
1171 {
1172 u8 cqe_version = ucontext->cqe_version;
1173
1174 if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) &&
1175 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1176 return 0;
1177
1178 if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) !=
1179 !!cqe_version))
1180 return -EINVAL;
1181
1182 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1183 }
1184
1185 void mlx5_ib_cleanup_congestion(struct mlx5_ib_dev *);
1186 int mlx5_ib_init_congestion(struct mlx5_ib_dev *);
1187
get_uars_per_sys_page(struct mlx5_ib_dev * dev,bool lib_support)1188 static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
1189 {
1190 return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1191 MLX5_UARS_IN_PAGE : 1;
1192 }
1193
get_num_static_uars(struct mlx5_ib_dev * dev,struct mlx5_bfreg_info * bfregi)1194 static inline int get_num_static_uars(struct mlx5_ib_dev *dev,
1195 struct mlx5_bfreg_info *bfregi)
1196 {
1197 return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_static_sys_pages;
1198 }
1199
1200 int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
1201 struct mlx5_bfreg_info *bfregi, u32 bfregn,
1202 bool dyn_bfreg);
1203
1204 #endif /* MLX5_IB_H */
1205