1 /*-
2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 #ifndef MLX5_CORE_CQ_H
27 #define MLX5_CORE_CQ_H
28
29 #include <rdma/ib_verbs.h>
30 #include <dev/mlx5/driver.h>
31 #include <dev/mlx5/mlx5_ifc.h>
32
33 struct mlx5_eqe;
34 struct mlx5_core_cq {
35 u32 cqn;
36 int cqe_sz;
37 __be32 *set_ci_db;
38 __be32 *arm_db;
39 unsigned vector;
40 int irqn;
41 void (*comp) (struct mlx5_core_cq *, struct mlx5_eqe *);
42 void (*event) (struct mlx5_core_cq *, int);
43 struct mlx5_uars_page *uar;
44 u32 cons_index;
45 unsigned arm_sn;
46 struct mlx5_rsc_debug *dbg;
47 int pid;
48 int reset_notify_added;
49 struct list_head reset_notify;
50 };
51
52
53 enum {
54 MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR = 0x01,
55 MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR = 0x02,
56 MLX5_CQE_SYNDROME_LOCAL_PROT_ERR = 0x04,
57 MLX5_CQE_SYNDROME_WR_FLUSH_ERR = 0x05,
58 MLX5_CQE_SYNDROME_MW_BIND_ERR = 0x06,
59 MLX5_CQE_SYNDROME_BAD_RESP_ERR = 0x10,
60 MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR = 0x11,
61 MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12,
62 MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR = 0x13,
63 MLX5_CQE_SYNDROME_REMOTE_OP_ERR = 0x14,
64 MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR = 0x15,
65 MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR = 0x16,
66 MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22,
67 };
68
69 enum {
70 MLX5_CQE_OWNER_MASK = 1,
71 MLX5_CQE_REQ = 0,
72 MLX5_CQE_RESP_WR_IMM = 1,
73 MLX5_CQE_RESP_SEND = 2,
74 MLX5_CQE_RESP_SEND_IMM = 3,
75 MLX5_CQE_RESP_SEND_INV = 4,
76 MLX5_CQE_RESIZE_CQ = 5,
77 MLX5_CQE_SIG_ERR = 12,
78 MLX5_CQE_REQ_ERR = 13,
79 MLX5_CQE_RESP_ERR = 14,
80 MLX5_CQE_INVALID = 15,
81 };
82
83 enum {
84 MLX5_CQ_MODIFY_PERIOD = 1 << 0,
85 MLX5_CQ_MODIFY_COUNT = 1 << 1,
86 MLX5_CQ_MODIFY_OVERRUN = 1 << 2,
87 MLX5_CQ_MODIFY_EQN = 1 << 3,
88 MLX5_CQ_MODIFY_PERIOD_MODE = 1 << 4,
89 };
90
91 enum {
92 MLX5_CQ_OPMOD_RESIZE = 1,
93 MLX5_MODIFY_CQ_MASK_LOG_SIZE = 1 << 0,
94 MLX5_MODIFY_CQ_MASK_PG_OFFSET = 1 << 1,
95 MLX5_MODIFY_CQ_MASK_PG_SIZE = 1 << 2,
96 };
97
98 struct mlx5_cq_modify_params {
99 int type;
100 union {
101 struct {
102 u32 page_offset;
103 u8 log_cq_size;
104 } resize;
105
106 struct {
107 } moder;
108
109 struct {
110 } mapping;
111 } params;
112 };
113
114 enum {
115 CQE_STRIDE_64 = 0,
116 CQE_STRIDE_128 = 1,
117 CQE_STRIDE_128_PAD = 2,
118 };
119
cqe_sz_to_mlx_sz(u8 size)120 static inline int cqe_sz_to_mlx_sz(u8 size)
121 {
122 return size == 64 ? CQE_SIZE_64 : CQE_SIZE_128;
123 }
124
mlx5_cq_set_ci(struct mlx5_core_cq * cq)125 static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
126 {
127 *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff);
128 }
129
130 enum {
131 MLX5_CQ_DB_REQ_NOT_SOL = 1 << 24,
132 MLX5_CQ_DB_REQ_NOT = 0 << 24
133 };
134
mlx5_cq_arm(struct mlx5_core_cq * cq,u32 cmd,void __iomem * uar_page,spinlock_t * doorbell_lock,u32 cons_index)135 static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
136 void __iomem *uar_page,
137 spinlock_t *doorbell_lock,
138 u32 cons_index)
139 {
140 __be32 doorbell[2];
141 u32 sn;
142 u32 ci;
143
144 sn = cq->arm_sn & 3;
145 ci = cons_index & 0xffffff;
146
147 *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci);
148
149 /* Make sure that the doorbell record in host memory is
150 * written before ringing the doorbell via PCI MMIO.
151 */
152 wmb();
153
154 doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
155 doorbell[1] = cpu_to_be32(cq->cqn);
156
157 mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, doorbell_lock);
158 }
159
160 int mlx5_init_cq_table(struct mlx5_core_dev *dev);
161 void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
162 int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
163 u32 *in, int inlen, u32 *out, int outlen);
164 int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
165 int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
166 u32 *out, int outlen);
167 int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
168 u32 *in, int inlen);
169 int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
170 struct mlx5_core_cq *cq, u16 cq_period,
171 u16 cq_max_count);
172 int mlx5_core_modify_cq_moderation_mode(struct mlx5_core_dev *dev,
173 struct mlx5_core_cq *cq,
174 u16 cq_period,
175 u16 cq_max_count,
176 u8 cq_mode);
177 int mlx5_core_modify_cq_by_mask(struct mlx5_core_dev *,
178 struct mlx5_core_cq *, u32 mask,
179 u16 cq_period, u16 cq_max_count,
180 u8 cq_mode, u8 cq_eqn);
181 int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
182 void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
183
184 #endif /* MLX5_CORE_CQ_H */
185