1 /*- 2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include "opt_rss.h" 29 #include "opt_ratelimit.h" 30 31 #include <linux/kernel.h> 32 #include <linux/module.h> 33 #include <linux/hardirq.h> 34 #include <dev/mlx5/driver.h> 35 #include <rdma/ib_verbs.h> 36 #include <dev/mlx5/cq.h> 37 #include <dev/mlx5/mlx5_core/mlx5_core.h> 38 39 #include <sys/epoch.h> 40 41 static void 42 mlx5_cq_table_write_lock(struct mlx5_cq_table *table) 43 { 44 45 atomic_inc(&table->writercount); 46 /* make sure all see the updated writercount */ 47 NET_EPOCH_WAIT(); 48 spin_lock(&table->writerlock); 49 } 50 51 static void 52 mlx5_cq_table_write_unlock(struct mlx5_cq_table *table) 53 { 54 55 spin_unlock(&table->writerlock); 56 atomic_dec(&table->writercount); 57 /* drain all pending CQ callers */ 58 NET_EPOCH_WAIT(); 59 } 60 61 void mlx5_cq_completion(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) 62 { 63 struct mlx5_cq_table *table = &dev->priv.cq_table; 64 struct mlx5_core_cq *cq; 65 struct epoch_tracker et; 66 u32 cqn; 67 bool do_lock; 68 69 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; 70 71 NET_EPOCH_ENTER(et); 72 73 do_lock = atomic_read(&table->writercount) != 0; 74 if (unlikely(do_lock)) 75 spin_lock(&table->writerlock); 76 77 if (likely(cqn < MLX5_CQ_LINEAR_ARRAY_SIZE)) 78 cq = table->linear_array[cqn].cq; 79 else 80 cq = radix_tree_lookup(&table->tree, cqn); 81 82 if (unlikely(do_lock)) 83 spin_unlock(&table->writerlock); 84 85 if (likely(cq != NULL)) { 86 ++cq->arm_sn; 87 cq->comp(cq, eqe); 88 } else { 89 mlx5_core_warn(dev, 90 "Completion event for bogus CQ 0x%x\n", cqn); 91 } 92 93 NET_EPOCH_EXIT(et); 94 } 95 96 void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type) 97 { 98 struct mlx5_cq_table *table = &dev->priv.cq_table; 99 struct mlx5_core_cq *cq; 100 struct epoch_tracker et; 101 bool do_lock; 102 103 NET_EPOCH_ENTER(et); 104 105 do_lock = atomic_read(&table->writercount) != 0; 106 if (unlikely(do_lock)) 107 spin_lock(&table->writerlock); 108 109 if (likely(cqn < MLX5_CQ_LINEAR_ARRAY_SIZE)) 110 cq = table->linear_array[cqn].cq; 111 else 112 cq = radix_tree_lookup(&table->tree, cqn); 113 114 if (unlikely(do_lock)) 115 spin_unlock(&table->writerlock); 116 117 if (likely(cq != NULL)) { 118 cq->event(cq, event_type); 119 } else { 120 mlx5_core_warn(dev, 121 "Asynchronous event for bogus CQ 0x%x\n", cqn); 122 } 123 124 NET_EPOCH_EXIT(et); 125 } 126 127 int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 128 u32 *in, int inlen, u32 *out, int outlen) 129 { 130 struct mlx5_cq_table *table = &dev->priv.cq_table; 131 u32 din[MLX5_ST_SZ_DW(destroy_cq_in)] = {0}; 132 u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)] = {0}; 133 int err; 134 135 memset(out, 0, outlen); 136 MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ); 137 err = mlx5_cmd_exec(dev, in, inlen, out, outlen); 138 if (err) 139 return err; 140 141 cq->cqn = MLX5_GET(create_cq_out, out, cqn); 142 cq->cons_index = 0; 143 cq->arm_sn = 0; 144 145 mlx5_cq_table_write_lock(table); 146 err = radix_tree_insert(&table->tree, cq->cqn, cq); 147 if (likely(err == 0 && cq->cqn < MLX5_CQ_LINEAR_ARRAY_SIZE)) 148 table->linear_array[cq->cqn].cq = cq; 149 mlx5_cq_table_write_unlock(table); 150 151 if (err) 152 goto err_cmd; 153 154 cq->pid = curthread->td_proc->p_pid; 155 cq->uar = dev->priv.uar; 156 157 return 0; 158 159 err_cmd: 160 MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ); 161 MLX5_SET(destroy_cq_in, din, cqn, cq->cqn); 162 mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout)); 163 return err; 164 } 165 EXPORT_SYMBOL(mlx5_core_create_cq); 166 167 int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) 168 { 169 struct mlx5_cq_table *table = &dev->priv.cq_table; 170 u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0}; 171 u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0}; 172 struct mlx5_core_cq *tmp; 173 174 mlx5_cq_table_write_lock(table); 175 if (likely(cq->cqn < MLX5_CQ_LINEAR_ARRAY_SIZE)) 176 table->linear_array[cq->cqn].cq = NULL; 177 tmp = radix_tree_delete(&table->tree, cq->cqn); 178 mlx5_cq_table_write_unlock(table); 179 180 if (unlikely(tmp == NULL)) { 181 mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn); 182 return -EINVAL; 183 } else if (unlikely(tmp != cq)) { 184 mlx5_core_warn(dev, "corrupted cqn 0x%x\n", cq->cqn); 185 return -EINVAL; 186 } 187 188 MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ); 189 MLX5_SET(destroy_cq_in, in, cqn, cq->cqn); 190 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 191 } 192 EXPORT_SYMBOL(mlx5_core_destroy_cq); 193 194 int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 195 u32 *out, int outlen) 196 { 197 u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {0}; 198 199 MLX5_SET(query_cq_in, in, opcode, MLX5_CMD_OP_QUERY_CQ); 200 MLX5_SET(query_cq_in, in, cqn, cq->cqn); 201 202 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); 203 } 204 EXPORT_SYMBOL(mlx5_core_query_cq); 205 206 207 int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 208 u32 *in, int inlen) 209 { 210 u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0}; 211 212 MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ); 213 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 214 } 215 EXPORT_SYMBOL(mlx5_core_modify_cq); 216 217 int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, 218 struct mlx5_core_cq *cq, 219 u16 cq_period, 220 u16 cq_max_count) 221 { 222 u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0}; 223 void *cqc; 224 225 MLX5_SET(modify_cq_in, in, cqn, cq->cqn); 226 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context); 227 MLX5_SET(cqc, cqc, cq_period, cq_period); 228 MLX5_SET(cqc, cqc, cq_max_count, cq_max_count); 229 MLX5_SET(modify_cq_in, in, 230 modify_field_select_resize_field_select.modify_field_select.modify_field_select, 231 MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT); 232 233 return mlx5_core_modify_cq(dev, cq, in, sizeof(in)); 234 } 235 236 int mlx5_core_modify_cq_moderation_mode(struct mlx5_core_dev *dev, 237 struct mlx5_core_cq *cq, 238 u16 cq_period, 239 u16 cq_max_count, 240 u8 cq_mode) 241 { 242 u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0}; 243 void *cqc; 244 245 MLX5_SET(modify_cq_in, in, cqn, cq->cqn); 246 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context); 247 MLX5_SET(cqc, cqc, cq_period, cq_period); 248 MLX5_SET(cqc, cqc, cq_max_count, cq_max_count); 249 MLX5_SET(cqc, cqc, cq_period_mode, cq_mode); 250 MLX5_SET(modify_cq_in, in, 251 modify_field_select_resize_field_select.modify_field_select.modify_field_select, 252 MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT | MLX5_CQ_MODIFY_PERIOD_MODE); 253 254 return mlx5_core_modify_cq(dev, cq, in, sizeof(in)); 255 } 256 257 int mlx5_init_cq_table(struct mlx5_core_dev *dev) 258 { 259 struct mlx5_cq_table *table = &dev->priv.cq_table; 260 261 memset(table, 0, sizeof(*table)); 262 spin_lock_init(&table->writerlock); 263 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); 264 265 return 0; 266 } 267 268 void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev) 269 { 270 } 271