xref: /freebsd/sys/dev/mlx5/mlx5_core/mlx5_cq.c (revision 148a8da8)
1 /*-
2  * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/hardirq.h>
31 #include <dev/mlx5/driver.h>
32 #include <rdma/ib_verbs.h>
33 #include <dev/mlx5/cq.h>
34 #include "mlx5_core.h"
35 
36 void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
37 {
38 	struct mlx5_core_cq *cq;
39 	struct mlx5_cq_table *table = &dev->priv.cq_table;
40 
41 	if (cqn < MLX5_CQ_LINEAR_ARRAY_SIZE) {
42 		struct mlx5_cq_linear_array_entry *entry;
43 
44 		entry = &table->linear_array[cqn];
45 		spin_lock(&entry->lock);
46 		cq = entry->cq;
47 		if (cq == NULL) {
48 			mlx5_core_warn(dev,
49 			    "Completion event for bogus CQ 0x%x\n", cqn);
50 		} else {
51 			++cq->arm_sn;
52 			cq->comp(cq);
53 		}
54 		spin_unlock(&entry->lock);
55 		return;
56 	}
57 
58 	spin_lock(&table->lock);
59 	cq = radix_tree_lookup(&table->tree, cqn);
60 	if (likely(cq))
61 		atomic_inc(&cq->refcount);
62 	spin_unlock(&table->lock);
63 
64 	if (!cq) {
65 		mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn);
66 		return;
67 	}
68 
69 	++cq->arm_sn;
70 
71 	cq->comp(cq);
72 
73 	if (atomic_dec_and_test(&cq->refcount))
74 		complete(&cq->free);
75 }
76 
77 void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
78 {
79 	struct mlx5_cq_table *table = &dev->priv.cq_table;
80 	struct mlx5_core_cq *cq;
81 
82 	spin_lock(&table->lock);
83 
84 	cq = radix_tree_lookup(&table->tree, cqn);
85 	if (cq)
86 		atomic_inc(&cq->refcount);
87 
88 	spin_unlock(&table->lock);
89 
90 	if (!cq) {
91 		mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn);
92 		return;
93 	}
94 
95 	cq->event(cq, event_type);
96 
97 	if (atomic_dec_and_test(&cq->refcount))
98 		complete(&cq->free);
99 }
100 
101 
102 int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
103 			u32 *in, int inlen)
104 {
105 	struct mlx5_cq_table *table = &dev->priv.cq_table;
106 	u32 out[MLX5_ST_SZ_DW(create_cq_out)] = {0};
107 	u32 din[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
108 	u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)] = {0};
109 	int err;
110 
111 	MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
112 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
113 	if (err)
114 		return err;
115 
116 	cq->cqn = MLX5_GET(create_cq_out, out, cqn);
117 	cq->cons_index = 0;
118 	cq->arm_sn     = 0;
119 	atomic_set(&cq->refcount, 1);
120 	init_completion(&cq->free);
121 
122 	spin_lock_irq(&table->lock);
123 	err = radix_tree_insert(&table->tree, cq->cqn, cq);
124 	spin_unlock_irq(&table->lock);
125 	if (err)
126 		goto err_cmd;
127 
128 	if (cq->cqn < MLX5_CQ_LINEAR_ARRAY_SIZE) {
129 		struct mlx5_cq_linear_array_entry *entry;
130 
131 		entry = &table->linear_array[cq->cqn];
132 		spin_lock_irq(&entry->lock);
133 		entry->cq = cq;
134 		spin_unlock_irq(&entry->lock);
135 	}
136 
137 	cq->pid = curthread->td_proc->p_pid;
138 
139 	return 0;
140 
141 err_cmd:
142 	MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
143 	MLX5_SET(destroy_cq_in, din, cqn, cq->cqn);
144 	mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
145 	return err;
146 }
147 EXPORT_SYMBOL(mlx5_core_create_cq);
148 
149 int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
150 {
151 	struct mlx5_cq_table *table = &dev->priv.cq_table;
152 	u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0};
153 	u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
154 	struct mlx5_core_cq *tmp;
155 	int err;
156 
157 	if (cq->cqn < MLX5_CQ_LINEAR_ARRAY_SIZE) {
158 		struct mlx5_cq_linear_array_entry *entry;
159 
160 		entry = &table->linear_array[cq->cqn];
161 		spin_lock_irq(&entry->lock);
162 		entry->cq = NULL;
163 		spin_unlock_irq(&entry->lock);
164 	}
165 
166 	spin_lock_irq(&table->lock);
167 	tmp = radix_tree_delete(&table->tree, cq->cqn);
168 	spin_unlock_irq(&table->lock);
169 	if (!tmp) {
170 		mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn);
171 		return -EINVAL;
172 	}
173 	if (tmp != cq) {
174 		mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn);
175 		return -EINVAL;
176 	}
177 
178 	MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
179 	MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
180 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
181 	if (err)
182 		goto out;
183 
184 	synchronize_irq(cq->irqn);
185 
186 	if (atomic_dec_and_test(&cq->refcount))
187 		complete(&cq->free);
188 	wait_for_completion(&cq->free);
189 
190 out:
191 
192 	return err;
193 }
194 EXPORT_SYMBOL(mlx5_core_destroy_cq);
195 
196 int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
197 		       u32 *out, int outlen)
198 {
199 	u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {0};
200 
201 	MLX5_SET(query_cq_in, in, opcode, MLX5_CMD_OP_QUERY_CQ);
202 	MLX5_SET(query_cq_in, in, cqn, cq->cqn);
203 
204 	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
205 }
206 EXPORT_SYMBOL(mlx5_core_query_cq);
207 
208 
209 int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
210 			u32 *in, int inlen)
211 {
212 	u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0};
213 
214 	MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ);
215 	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
216 }
217 EXPORT_SYMBOL(mlx5_core_modify_cq);
218 
219 int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
220 				   struct mlx5_core_cq *cq,
221 				   u16 cq_period,
222 				   u16 cq_max_count)
223 {
224 	u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0};
225 	void *cqc;
226 
227 	MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
228 	cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
229 	MLX5_SET(cqc, cqc, cq_period, cq_period);
230 	MLX5_SET(cqc, cqc, cq_max_count, cq_max_count);
231 	MLX5_SET(modify_cq_in, in,
232 		 modify_field_select_resize_field_select.modify_field_select.modify_field_select,
233 		 MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
234 
235 	return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
236 }
237 
238 int mlx5_core_modify_cq_moderation_mode(struct mlx5_core_dev *dev,
239 					struct mlx5_core_cq *cq,
240 					u16 cq_period,
241 					u16 cq_max_count,
242 					u8 cq_mode)
243 {
244 	u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0};
245 	void *cqc;
246 
247 	MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
248 	cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
249 	MLX5_SET(cqc, cqc, cq_period, cq_period);
250 	MLX5_SET(cqc, cqc, cq_max_count, cq_max_count);
251 	MLX5_SET(cqc, cqc, cq_period_mode, cq_mode);
252 	MLX5_SET(modify_cq_in, in,
253 		 modify_field_select_resize_field_select.modify_field_select.modify_field_select,
254 		 MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT | MLX5_CQ_MODIFY_PERIOD_MODE);
255 
256 	return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
257 }
258 
259 int mlx5_init_cq_table(struct mlx5_core_dev *dev)
260 {
261 	struct mlx5_cq_table *table = &dev->priv.cq_table;
262 	int err;
263 	int x;
264 
265 	memset(table, 0, sizeof(*table));
266 	spin_lock_init(&table->lock);
267 	for (x = 0; x != MLX5_CQ_LINEAR_ARRAY_SIZE; x++)
268 		spin_lock_init(&table->linear_array[x].lock);
269 	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
270 	err = 0;
271 
272 	return err;
273 }
274 
275 void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev)
276 {
277 }
278