1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #define LINUXKPI_PARAM_PREFIX mlx4_ 36 37 #include <linux/etherdevice.h> 38 #include <dev/mlx4/cmd.h> 39 #include <linux/module.h> 40 #include <linux/cache.h> 41 42 #include <net/ipv6.h> 43 44 #include "fw.h" 45 #include "icm.h" 46 47 enum { 48 MLX4_COMMAND_INTERFACE_MIN_REV = 2, 49 MLX4_COMMAND_INTERFACE_MAX_REV = 3, 50 MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3, 51 }; 52 53 extern void __buggy_use_of_MLX4_GET(void); 54 extern void __buggy_use_of_MLX4_PUT(void); 55 56 static bool enable_qos; 57 module_param(enable_qos, bool, 0444); 58 MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: off)"); 59 60 #define MLX4_GET(dest, source, offset) \ 61 do { \ 62 void *__p = (char *) (source) + (offset); \ 63 typedef struct { u64 value; } __packed u64_p_t; \ 64 u64 val; \ 65 switch (sizeof (dest)) { \ 66 case 1: (dest) = *(u8 *) __p; break; \ 67 case 2: (dest) = be16_to_cpup(__p); break; \ 68 case 4: (dest) = be32_to_cpup(__p); break; \ 69 case 8: val = ((u64_p_t *)__p)->value; \ 70 (dest) = be64_to_cpu(val); break; \ 71 default: __buggy_use_of_MLX4_GET(); \ 72 } \ 73 } while (0) 74 75 #define MLX4_PUT(dest, source, offset) \ 76 do { \ 77 void *__d = ((char *) (dest) + (offset)); \ 78 switch (sizeof(source)) { \ 79 case 1: *(u8 *) __d = (source); break; \ 80 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \ 81 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \ 82 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \ 83 default: __buggy_use_of_MLX4_PUT(); \ 84 } \ 85 } while (0) 86 87 static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags) 88 { 89 static const char *fname[] = { 90 [ 0] = "RC transport", 91 [ 1] = "UC transport", 92 [ 2] = "UD transport", 93 [ 3] = "XRC transport", 94 [ 6] = "SRQ support", 95 [ 7] = "IPoIB checksum offload", 96 [ 8] = "P_Key violation counter", 97 [ 9] = "Q_Key violation counter", 98 [12] = "Dual Port Different Protocol (DPDP) support", 99 [15] = "Big LSO headers", 100 [16] = "MW support", 101 [17] = "APM support", 102 [18] = "Atomic ops support", 103 [19] = "Raw multicast support", 104 [20] = "Address vector port checking support", 105 [21] = "UD multicast support", 106 [30] = "IBoE support", 107 [32] = "Unicast loopback support", 108 [34] = "FCS header control", 109 [37] = "Wake On LAN (port1) support", 110 [38] = "Wake On LAN (port2) support", 111 [40] = "UDP RSS support", 112 [41] = "Unicast VEP steering support", 113 [42] = "Multicast VEP steering support", 114 [48] = "Counters support", 115 [52] = "RSS IP fragments support", 116 [53] = "Port ETS Scheduler support", 117 [55] = "Port link type sensing support", 118 [59] = "Port management change event support", 119 [61] = "64 byte EQE support", 120 [62] = "64 byte CQE support", 121 }; 122 int i; 123 124 mlx4_dbg(dev, "DEV_CAP flags:\n"); 125 for (i = 0; i < ARRAY_SIZE(fname); ++i) 126 if (fname[i] && (flags & (1LL << i))) 127 mlx4_dbg(dev, " %s\n", fname[i]); 128 } 129 130 static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) 131 { 132 static const char * const fname[] = { 133 [0] = "RSS support", 134 [1] = "RSS Toeplitz Hash Function support", 135 [2] = "RSS XOR Hash Function support", 136 [3] = "Device managed flow steering support", 137 [4] = "Automatic MAC reassignment support", 138 [5] = "Time stamping support", 139 [6] = "VST (control vlan insertion/stripping) support", 140 [7] = "FSM (MAC anti-spoofing) support", 141 [8] = "Dynamic QP updates support", 142 [9] = "Device managed flow steering IPoIB support", 143 [10] = "TCP/IP offloads/flow-steering for VXLAN support", 144 [11] = "MAD DEMUX (Secure-Host) support", 145 [12] = "Large cache line (>64B) CQE stride support", 146 [13] = "Large cache line (>64B) EQE stride support", 147 [14] = "Ethernet protocol control support", 148 [15] = "Ethernet Backplane autoneg support", 149 [16] = "CONFIG DEV support", 150 [17] = "Asymmetric EQs support", 151 [18] = "More than 80 VFs support", 152 [19] = "Performance optimized for limited rule configuration flow steering support", 153 [20] = "Recoverable error events support", 154 [21] = "Port Remap support", 155 [22] = "QCN support", 156 [23] = "QP rate limiting support", 157 [24] = "Ethernet Flow control statistics support", 158 [25] = "Granular QoS per VF support", 159 [26] = "Port ETS Scheduler support", 160 [27] = "Port beacon support", 161 [28] = "RX-ALL support", 162 [29] = "802.1ad offload support", 163 [31] = "Modifying loopback source checks using UPDATE_QP support", 164 [32] = "Loopback source checks support", 165 [33] = "RoCEv2 support", 166 [34] = "DMFS Sniffer support (UC & MC)", 167 [35] = "QinQ VST mode support", 168 [36] = "sl to vl mapping table change event support" 169 }; 170 int i; 171 172 for (i = 0; i < ARRAY_SIZE(fname); ++i) 173 if (fname[i] && (flags & (1LL << i))) 174 mlx4_dbg(dev, " %s\n", fname[i]); 175 } 176 177 int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg) 178 { 179 struct mlx4_cmd_mailbox *mailbox; 180 u32 *inbox; 181 int err = 0; 182 183 #define MOD_STAT_CFG_IN_SIZE 0x100 184 185 #define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002 186 #define MOD_STAT_CFG_PG_SZ_OFFSET 0x003 187 188 mailbox = mlx4_alloc_cmd_mailbox(dev); 189 if (IS_ERR(mailbox)) 190 return PTR_ERR(mailbox); 191 inbox = mailbox->buf; 192 193 MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET); 194 MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET); 195 196 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG, 197 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 198 199 mlx4_free_cmd_mailbox(dev, mailbox); 200 return err; 201 } 202 203 int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave) 204 { 205 struct mlx4_cmd_mailbox *mailbox; 206 u32 *outbox; 207 u8 in_modifier; 208 u8 field; 209 u16 field16; 210 int err; 211 212 #define QUERY_FUNC_BUS_OFFSET 0x00 213 #define QUERY_FUNC_DEVICE_OFFSET 0x01 214 #define QUERY_FUNC_FUNCTION_OFFSET 0x01 215 #define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET 0x03 216 #define QUERY_FUNC_RSVD_EQS_OFFSET 0x04 217 #define QUERY_FUNC_MAX_EQ_OFFSET 0x06 218 #define QUERY_FUNC_RSVD_UARS_OFFSET 0x0b 219 220 mailbox = mlx4_alloc_cmd_mailbox(dev); 221 if (IS_ERR(mailbox)) 222 return PTR_ERR(mailbox); 223 outbox = mailbox->buf; 224 225 in_modifier = slave; 226 227 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, 0, 228 MLX4_CMD_QUERY_FUNC, 229 MLX4_CMD_TIME_CLASS_A, 230 MLX4_CMD_NATIVE); 231 if (err) 232 goto out; 233 234 MLX4_GET(field, outbox, QUERY_FUNC_BUS_OFFSET); 235 func->bus = field & 0xf; 236 MLX4_GET(field, outbox, QUERY_FUNC_DEVICE_OFFSET); 237 func->device = field & 0xf1; 238 MLX4_GET(field, outbox, QUERY_FUNC_FUNCTION_OFFSET); 239 func->function = field & 0x7; 240 MLX4_GET(field, outbox, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET); 241 func->physical_function = field & 0xf; 242 MLX4_GET(field16, outbox, QUERY_FUNC_RSVD_EQS_OFFSET); 243 func->rsvd_eqs = field16 & 0xffff; 244 MLX4_GET(field16, outbox, QUERY_FUNC_MAX_EQ_OFFSET); 245 func->max_eq = field16 & 0xffff; 246 MLX4_GET(field, outbox, QUERY_FUNC_RSVD_UARS_OFFSET); 247 func->rsvd_uars = field & 0x0f; 248 249 mlx4_dbg(dev, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n", 250 func->bus, func->device, func->function, func->physical_function, 251 func->max_eq, func->rsvd_eqs, func->rsvd_uars); 252 253 out: 254 mlx4_free_cmd_mailbox(dev, mailbox); 255 return err; 256 } 257 258 static int mlx4_activate_vst_qinq(struct mlx4_priv *priv, int slave, int port) 259 { 260 struct mlx4_vport_oper_state *vp_oper; 261 struct mlx4_vport_state *vp_admin; 262 int err; 263 264 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 265 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; 266 267 if (vp_admin->default_vlan != vp_oper->state.default_vlan) { 268 err = __mlx4_register_vlan(&priv->dev, port, 269 vp_admin->default_vlan, 270 &vp_oper->vlan_idx); 271 if (err) { 272 vp_oper->vlan_idx = NO_INDX; 273 mlx4_warn(&priv->dev, 274 "No vlan resources slave %d, port %d\n", 275 slave, port); 276 return err; 277 } 278 mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n", 279 (int)(vp_oper->state.default_vlan), 280 vp_oper->vlan_idx, slave, port); 281 } 282 vp_oper->state.vlan_proto = vp_admin->vlan_proto; 283 vp_oper->state.default_vlan = vp_admin->default_vlan; 284 vp_oper->state.default_qos = vp_admin->default_qos; 285 286 return 0; 287 } 288 289 static int mlx4_handle_vst_qinq(struct mlx4_priv *priv, int slave, int port) 290 { 291 struct mlx4_vport_oper_state *vp_oper; 292 struct mlx4_slave_state *slave_state; 293 struct mlx4_vport_state *vp_admin; 294 int err; 295 296 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 297 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; 298 slave_state = &priv->mfunc.master.slave_state[slave]; 299 300 if ((vp_admin->vlan_proto != htons(ETH_P_8021AD)) || 301 (!slave_state->active)) 302 return 0; 303 304 if (vp_oper->state.vlan_proto == vp_admin->vlan_proto && 305 vp_oper->state.default_vlan == vp_admin->default_vlan && 306 vp_oper->state.default_qos == vp_admin->default_qos) 307 return 0; 308 309 if (!slave_state->vst_qinq_supported) { 310 /* Warn and revert the request to set vst QinQ mode */ 311 vp_admin->vlan_proto = vp_oper->state.vlan_proto; 312 vp_admin->default_vlan = vp_oper->state.default_vlan; 313 vp_admin->default_qos = vp_oper->state.default_qos; 314 315 mlx4_warn(&priv->dev, 316 "Slave %d does not support VST QinQ mode\n", slave); 317 return 0; 318 } 319 320 err = mlx4_activate_vst_qinq(priv, slave, port); 321 return err; 322 } 323 324 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, 325 struct mlx4_vhcr *vhcr, 326 struct mlx4_cmd_mailbox *inbox, 327 struct mlx4_cmd_mailbox *outbox, 328 struct mlx4_cmd_info *cmd) 329 { 330 struct mlx4_priv *priv = mlx4_priv(dev); 331 u8 field, port; 332 u32 size, proxy_qp, qkey; 333 int err = 0; 334 struct mlx4_func func; 335 336 #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 337 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 338 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4 339 #define QUERY_FUNC_CAP_FMR_OFFSET 0x8 340 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10 341 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14 342 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18 343 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20 344 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24 345 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28 346 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c 347 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30 348 #define QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET 0x48 349 350 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50 351 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54 352 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58 353 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60 354 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64 355 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68 356 357 #define QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET 0x6c 358 359 #define QUERY_FUNC_CAP_FMR_FLAG 0x80 360 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40 361 #define QUERY_FUNC_CAP_FLAG_ETH 0x80 362 #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10 363 #define QUERY_FUNC_CAP_FLAG_RESD_LKEY 0x08 364 #define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04 365 366 #define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31) 367 #define QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG (1UL << 30) 368 369 /* when opcode modifier = 1 */ 370 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 371 #define QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET 0x4 372 #define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8 373 #define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc 374 375 #define QUERY_FUNC_CAP_QP0_TUNNEL 0x10 376 #define QUERY_FUNC_CAP_QP0_PROXY 0x14 377 #define QUERY_FUNC_CAP_QP1_TUNNEL 0x18 378 #define QUERY_FUNC_CAP_QP1_PROXY 0x1c 379 #define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28 380 381 #define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40 382 #define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80 383 #define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10 384 #define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08 385 386 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80 387 #define QUERY_FUNC_CAP_PHV_BIT 0x40 388 #define QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE 0x20 389 390 #define QUERY_FUNC_CAP_SUPPORTS_VST_QINQ BIT(30) 391 #define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS BIT(31) 392 393 if (vhcr->op_modifier == 1) { 394 struct mlx4_active_ports actv_ports = 395 mlx4_get_active_ports(dev, slave); 396 int converted_port = mlx4_slave_convert_port( 397 dev, slave, vhcr->in_modifier); 398 struct mlx4_vport_oper_state *vp_oper; 399 400 if (converted_port < 0) 401 return -EINVAL; 402 403 vhcr->in_modifier = converted_port; 404 /* phys-port = logical-port */ 405 field = vhcr->in_modifier - 406 find_first_bit(actv_ports.ports, dev->caps.num_ports); 407 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); 408 409 port = vhcr->in_modifier; 410 proxy_qp = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1; 411 412 /* Set nic_info bit to mark new fields support */ 413 field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO; 414 415 if (mlx4_vf_smi_enabled(dev, slave, port) && 416 !mlx4_get_parav_qkey(dev, proxy_qp, &qkey)) { 417 field |= QUERY_FUNC_CAP_VF_ENABLE_QP0; 418 MLX4_PUT(outbox->buf, qkey, 419 QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET); 420 } 421 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET); 422 423 /* size is now the QP number */ 424 size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1; 425 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL); 426 427 size += 2; 428 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL); 429 430 MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP0_PROXY); 431 proxy_qp += 2; 432 MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP1_PROXY); 433 434 MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier], 435 QUERY_FUNC_CAP_PHYS_PORT_ID); 436 437 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 438 err = mlx4_handle_vst_qinq(priv, slave, port); 439 if (err) 440 return err; 441 442 field = 0; 443 if (dev->caps.phv_bit[port]) 444 field |= QUERY_FUNC_CAP_PHV_BIT; 445 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) 446 field |= QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE; 447 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS0_OFFSET); 448 449 } else if (vhcr->op_modifier == 0) { 450 struct mlx4_active_ports actv_ports = 451 mlx4_get_active_ports(dev, slave); 452 struct mlx4_slave_state *slave_state = 453 &priv->mfunc.master.slave_state[slave]; 454 455 /* enable rdma and ethernet interfaces, new quota locations, 456 * and reserved lkey 457 */ 458 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA | 459 QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX | 460 QUERY_FUNC_CAP_FLAG_RESD_LKEY); 461 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); 462 463 field = min( 464 bitmap_weight(actv_ports.ports, dev->caps.num_ports), 465 dev->caps.num_ports); 466 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); 467 468 size = dev->caps.function_caps; /* set PF behaviours */ 469 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET); 470 471 field = 0; /* protected FMR support not available as yet */ 472 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET); 473 474 size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave]; 475 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); 476 size = dev->caps.num_qps; 477 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP); 478 479 size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave]; 480 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); 481 size = dev->caps.num_srqs; 482 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP); 483 484 size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave]; 485 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); 486 size = dev->caps.num_cqs; 487 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP); 488 489 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) || 490 mlx4_QUERY_FUNC(dev, &func, slave)) { 491 size = vhcr->in_modifier & 492 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ? 493 dev->caps.num_eqs : 494 rounddown_pow_of_two(dev->caps.num_eqs); 495 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 496 size = dev->caps.reserved_eqs; 497 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 498 } else { 499 size = vhcr->in_modifier & 500 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ? 501 func.max_eq : 502 rounddown_pow_of_two(func.max_eq); 503 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 504 size = func.rsvd_eqs; 505 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 506 } 507 508 size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave]; 509 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); 510 size = dev->caps.num_mpts; 511 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP); 512 513 size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave]; 514 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); 515 size = dev->caps.num_mtts; 516 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP); 517 518 size = dev->caps.num_mgms + dev->caps.num_amgms; 519 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); 520 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP); 521 522 size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG | 523 QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG; 524 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET); 525 526 size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00); 527 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET); 528 529 if (vhcr->in_modifier & QUERY_FUNC_CAP_SUPPORTS_VST_QINQ) 530 slave_state->vst_qinq_supported = true; 531 532 } else 533 err = -EINVAL; 534 535 return err; 536 } 537 538 int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, 539 struct mlx4_func_cap *func_cap) 540 { 541 struct mlx4_cmd_mailbox *mailbox; 542 u32 *outbox; 543 u8 field, op_modifier; 544 u32 size, qkey; 545 int err = 0, quotas = 0; 546 u32 in_modifier; 547 u32 slave_caps; 548 549 op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */ 550 slave_caps = QUERY_FUNC_CAP_SUPPORTS_VST_QINQ | 551 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS; 552 in_modifier = op_modifier ? gen_or_port : slave_caps; 553 554 mailbox = mlx4_alloc_cmd_mailbox(dev); 555 if (IS_ERR(mailbox)) 556 return PTR_ERR(mailbox); 557 558 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, op_modifier, 559 MLX4_CMD_QUERY_FUNC_CAP, 560 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 561 if (err) 562 goto out; 563 564 outbox = mailbox->buf; 565 566 if (!op_modifier) { 567 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET); 568 if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) { 569 mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n"); 570 err = -EPROTONOSUPPORT; 571 goto out; 572 } 573 func_cap->flags = field; 574 quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS); 575 576 MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); 577 func_cap->num_ports = field; 578 579 MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET); 580 func_cap->pf_context_behaviour = size; 581 582 if (quotas) { 583 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); 584 func_cap->qp_quota = size & 0xFFFFFF; 585 586 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); 587 func_cap->srq_quota = size & 0xFFFFFF; 588 589 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); 590 func_cap->cq_quota = size & 0xFFFFFF; 591 592 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); 593 func_cap->mpt_quota = size & 0xFFFFFF; 594 595 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); 596 func_cap->mtt_quota = size & 0xFFFFFF; 597 598 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); 599 func_cap->mcg_quota = size & 0xFFFFFF; 600 601 } else { 602 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP); 603 func_cap->qp_quota = size & 0xFFFFFF; 604 605 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP); 606 func_cap->srq_quota = size & 0xFFFFFF; 607 608 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP); 609 func_cap->cq_quota = size & 0xFFFFFF; 610 611 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP); 612 func_cap->mpt_quota = size & 0xFFFFFF; 613 614 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP); 615 func_cap->mtt_quota = size & 0xFFFFFF; 616 617 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP); 618 func_cap->mcg_quota = size & 0xFFFFFF; 619 } 620 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 621 func_cap->max_eq = size & 0xFFFFFF; 622 623 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 624 func_cap->reserved_eq = size & 0xFFFFFF; 625 626 if (func_cap->flags & QUERY_FUNC_CAP_FLAG_RESD_LKEY) { 627 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET); 628 func_cap->reserved_lkey = size; 629 } else { 630 func_cap->reserved_lkey = 0; 631 } 632 633 func_cap->extra_flags = 0; 634 635 /* Mailbox data from 0x6c and onward should only be treated if 636 * QUERY_FUNC_CAP_FLAG_VALID_MAILBOX is set in func_cap->flags 637 */ 638 if (func_cap->flags & QUERY_FUNC_CAP_FLAG_VALID_MAILBOX) { 639 MLX4_GET(size, outbox, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET); 640 if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG) 641 func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_BF_RES_QP; 642 if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG) 643 func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_A0_RES_QP; 644 } 645 646 goto out; 647 } 648 649 /* logical port query */ 650 if (gen_or_port > dev->caps.num_ports) { 651 err = -EINVAL; 652 goto out; 653 } 654 655 MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET); 656 if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) { 657 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN) { 658 mlx4_err(dev, "VLAN is enforced on this port\n"); 659 err = -EPROTONOSUPPORT; 660 goto out; 661 } 662 663 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) { 664 mlx4_err(dev, "Force mac is enabled on this port\n"); 665 err = -EPROTONOSUPPORT; 666 goto out; 667 } 668 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) { 669 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET); 670 if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) { 671 mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n"); 672 err = -EPROTONOSUPPORT; 673 goto out; 674 } 675 } 676 677 MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); 678 func_cap->physical_port = field; 679 if (func_cap->physical_port != gen_or_port) { 680 err = -ENOSYS; 681 goto out; 682 } 683 684 if (func_cap->flags1 & QUERY_FUNC_CAP_VF_ENABLE_QP0) { 685 MLX4_GET(qkey, outbox, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET); 686 func_cap->qp0_qkey = qkey; 687 } else { 688 func_cap->qp0_qkey = 0; 689 } 690 691 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL); 692 func_cap->qp0_tunnel_qpn = size & 0xFFFFFF; 693 694 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY); 695 func_cap->qp0_proxy_qpn = size & 0xFFFFFF; 696 697 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL); 698 func_cap->qp1_tunnel_qpn = size & 0xFFFFFF; 699 700 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY); 701 func_cap->qp1_proxy_qpn = size & 0xFFFFFF; 702 703 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO) 704 MLX4_GET(func_cap->phys_port_id, outbox, 705 QUERY_FUNC_CAP_PHYS_PORT_ID); 706 707 MLX4_GET(func_cap->flags0, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET); 708 709 /* All other resources are allocated by the master, but we still report 710 * 'num' and 'reserved' capabilities as follows: 711 * - num remains the maximum resource index 712 * - 'num - reserved' is the total available objects of a resource, but 713 * resource indices may be less than 'reserved' 714 * TODO: set per-resource quotas */ 715 716 out: 717 mlx4_free_cmd_mailbox(dev, mailbox); 718 719 return err; 720 } 721 722 static void disable_unsupported_roce_caps(void *buf); 723 724 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 725 { 726 struct mlx4_cmd_mailbox *mailbox; 727 u32 *outbox; 728 u8 field; 729 u32 field32, flags, ext_flags; 730 u16 size; 731 u16 stat_rate; 732 int err; 733 int i; 734 735 #define QUERY_DEV_CAP_OUT_SIZE 0x100 736 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10 737 #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11 738 #define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12 739 #define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13 740 #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14 741 #define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15 742 #define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16 743 #define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17 744 #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19 745 #define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a 746 #define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b 747 #define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d 748 #define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e 749 #define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f 750 #define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20 751 #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21 752 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22 753 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23 754 #define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET 0x26 755 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27 756 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29 757 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b 758 #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d 759 #define QUERY_DEV_CAP_RSS_OFFSET 0x2e 760 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f 761 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33 762 #define QUERY_DEV_CAP_PORT_BEACON_OFFSET 0x34 763 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35 764 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36 765 #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37 766 #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38 767 #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b 768 #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c 769 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e 770 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f 771 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 772 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 773 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 774 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 775 #define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b 776 #define QUERY_DEV_CAP_BF_OFFSET 0x4c 777 #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d 778 #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e 779 #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f 780 #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51 781 #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52 782 #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55 783 #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56 784 #define QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET 0x5D 785 #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61 786 #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62 787 #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63 788 #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64 789 #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65 790 #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66 791 #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67 792 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68 793 #define QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET 0x70 794 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70 795 #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74 796 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 797 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 798 #define QUERY_DEV_CAP_SL2VL_EVENT_OFFSET 0x78 799 #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a 800 #define QUERY_DEV_CAP_ECN_QCN_VER_OFFSET 0x7b 801 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 802 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 803 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 804 #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86 805 #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88 806 #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a 807 #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c 808 #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e 809 #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90 810 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92 811 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94 812 #define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94 813 #define QUERY_DEV_CAP_PHV_EN_OFFSET 0x96 814 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 815 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 816 #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c 817 #define QUERY_DEV_CAP_DIAG_RPRT_PER_PORT 0x9c 818 #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d 819 #define QUERY_DEV_CAP_VXLAN 0x9e 820 #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0 821 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8 822 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac 823 #define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc 824 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0 825 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2 826 827 828 dev_cap->flags2 = 0; 829 mailbox = mlx4_alloc_cmd_mailbox(dev); 830 if (IS_ERR(mailbox)) 831 return PTR_ERR(mailbox); 832 outbox = mailbox->buf; 833 834 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 835 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 836 if (err) 837 goto out; 838 839 if (mlx4_is_mfunc(dev)) 840 disable_unsupported_roce_caps(outbox); 841 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET); 842 dev_cap->reserved_qps = 1 << (field & 0xf); 843 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET); 844 dev_cap->max_qps = 1 << (field & 0x1f); 845 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET); 846 dev_cap->reserved_srqs = 1 << (field >> 4); 847 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET); 848 dev_cap->max_srqs = 1 << (field & 0x1f); 849 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET); 850 dev_cap->max_cq_sz = 1 << field; 851 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET); 852 dev_cap->reserved_cqs = 1 << (field & 0xf); 853 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET); 854 dev_cap->max_cqs = 1 << (field & 0x1f); 855 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET); 856 dev_cap->max_mpts = 1 << (field & 0x3f); 857 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET); 858 dev_cap->reserved_eqs = 1 << (field & 0xf); 859 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET); 860 dev_cap->max_eqs = 1 << (field & 0xf); 861 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET); 862 dev_cap->reserved_mtts = 1 << (field >> 4); 863 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET); 864 dev_cap->reserved_mrws = 1 << (field & 0xf); 865 MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET); 866 dev_cap->num_sys_eqs = size & 0xfff; 867 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET); 868 dev_cap->max_requester_per_qp = 1 << (field & 0x3f); 869 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET); 870 dev_cap->max_responder_per_qp = 1 << (field & 0x3f); 871 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET); 872 field &= 0x1f; 873 if (!field) 874 dev_cap->max_gso_sz = 0; 875 else 876 dev_cap->max_gso_sz = 1 << field; 877 878 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET); 879 if (field & 0x20) 880 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR; 881 if (field & 0x10) 882 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP; 883 field &= 0xf; 884 if (field) { 885 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS; 886 dev_cap->max_rss_tbl_sz = 1 << field; 887 } else 888 dev_cap->max_rss_tbl_sz = 0; 889 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET); 890 dev_cap->max_rdma_global = 1 << (field & 0x3f); 891 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET); 892 dev_cap->local_ca_ack_delay = field & 0x1f; 893 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); 894 dev_cap->num_ports = field & 0xf; 895 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET); 896 dev_cap->max_msg_sz = 1 << (field & 0x1f); 897 MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET); 898 if (field & 0x10) 899 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN; 900 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); 901 if (field & 0x80) 902 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN; 903 dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f; 904 if (field & 0x20) 905 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER; 906 MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_BEACON_OFFSET); 907 if (field & 0x80) 908 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_BEACON; 909 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); 910 if (field & 0x80) 911 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB; 912 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET); 913 dev_cap->fs_max_num_qp_per_entry = field; 914 MLX4_GET(field, outbox, QUERY_DEV_CAP_SL2VL_EVENT_OFFSET); 915 if (field & (1 << 5)) 916 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT; 917 MLX4_GET(field, outbox, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); 918 if (field & 0x1) 919 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QCN; 920 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); 921 dev_cap->stat_rate_support = stat_rate; 922 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); 923 if (field & 0x80) 924 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS; 925 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 926 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); 927 dev_cap->flags = flags | (u64)ext_flags << 32; 928 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); 929 dev_cap->reserved_uars = field >> 4; 930 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET); 931 dev_cap->uar_size = 1 << ((field & 0x3f) + 20); 932 MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET); 933 dev_cap->min_page_sz = 1 << field; 934 935 MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET); 936 if (field & 0x80) { 937 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); 938 dev_cap->bf_reg_size = 1 << (field & 0x1f); 939 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); 940 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) 941 field = 3; 942 dev_cap->bf_regs_per_page = 1 << (field & 0x3f); 943 } else { 944 dev_cap->bf_reg_size = 0; 945 } 946 947 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET); 948 dev_cap->max_sq_sg = field; 949 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET); 950 dev_cap->max_sq_desc_sz = size; 951 952 MLX4_GET(field, outbox, QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET); 953 if (field & 0x1) 954 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP; 955 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET); 956 dev_cap->max_qp_per_mcg = 1 << field; 957 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET); 958 dev_cap->reserved_mgms = field & 0xf; 959 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET); 960 dev_cap->max_mcgs = 1 << field; 961 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET); 962 dev_cap->reserved_pds = field >> 4; 963 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET); 964 dev_cap->max_pds = 1 << (field & 0x3f); 965 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET); 966 dev_cap->reserved_xrcds = field >> 4; 967 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET); 968 dev_cap->max_xrcds = 1 << (field & 0x1f); 969 970 MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET); 971 dev_cap->rdmarc_entry_sz = size; 972 MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET); 973 dev_cap->qpc_entry_sz = size; 974 MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET); 975 dev_cap->aux_entry_sz = size; 976 MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET); 977 dev_cap->altc_entry_sz = size; 978 MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET); 979 dev_cap->eqc_entry_sz = size; 980 MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET); 981 dev_cap->cqc_entry_sz = size; 982 MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET); 983 dev_cap->srq_entry_sz = size; 984 MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET); 985 dev_cap->cmpt_entry_sz = size; 986 MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET); 987 dev_cap->mtt_entry_sz = size; 988 MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET); 989 dev_cap->dmpt_entry_sz = size; 990 991 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET); 992 dev_cap->max_srq_sz = 1 << field; 993 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET); 994 dev_cap->max_qp_sz = 1 << field; 995 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET); 996 dev_cap->resize_srq = field & 1; 997 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET); 998 dev_cap->max_rq_sg = field; 999 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); 1000 dev_cap->max_rq_desc_sz = size; 1001 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); 1002 if (field & (1 << 4)) 1003 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QOS_VPP; 1004 if (field & (1 << 5)) 1005 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL; 1006 if (field & (1 << 6)) 1007 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 1008 if (field & (1 << 7)) 1009 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 1010 MLX4_GET(dev_cap->bmme_flags, outbox, 1011 QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1012 if (dev_cap->bmme_flags & MLX4_FLAG_ROCE_V1_V2) 1013 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ROCE_V1_V2; 1014 if (dev_cap->bmme_flags & MLX4_FLAG_PORT_REMAP) 1015 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_REMAP; 1016 MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); 1017 if (field & 0x20) 1018 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV; 1019 if (field & (1 << 2)) 1020 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS; 1021 MLX4_GET(field, outbox, QUERY_DEV_CAP_PHV_EN_OFFSET); 1022 if (field & 0x80) 1023 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PHV_EN; 1024 if (field & 0x40) 1025 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN; 1026 1027 MLX4_GET(dev_cap->reserved_lkey, outbox, 1028 QUERY_DEV_CAP_RSVD_LKEY_OFFSET); 1029 MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET); 1030 if (field32 & (1 << 0)) 1031 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP; 1032 if (field32 & (1 << 7)) 1033 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT; 1034 MLX4_GET(field32, outbox, QUERY_DEV_CAP_DIAG_RPRT_PER_PORT); 1035 if (field32 & (1 << 17)) 1036 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT; 1037 MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); 1038 if (field & (1 << 6)) 1039 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN; 1040 MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN); 1041 if (field & (1 << 3)) 1042 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS; 1043 if (field & (1 << 5)) 1044 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG; 1045 MLX4_GET(dev_cap->max_icm_sz, outbox, 1046 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); 1047 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS) 1048 MLX4_GET(dev_cap->max_counters, outbox, 1049 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET); 1050 1051 MLX4_GET(field32, outbox, 1052 QUERY_DEV_CAP_MAD_DEMUX_OFFSET); 1053 if (field32 & (1 << 0)) 1054 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_MAD_DEMUX; 1055 1056 MLX4_GET(dev_cap->dmfs_high_rate_qpn_base, outbox, 1057 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET); 1058 dev_cap->dmfs_high_rate_qpn_base &= MGM_QPN_MASK; 1059 MLX4_GET(dev_cap->dmfs_high_rate_qpn_range, outbox, 1060 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET); 1061 dev_cap->dmfs_high_rate_qpn_range &= MGM_QPN_MASK; 1062 1063 MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET); 1064 dev_cap->rl_caps.num_rates = size; 1065 if (dev_cap->rl_caps.num_rates) { 1066 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT; 1067 MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET); 1068 dev_cap->rl_caps.max_val = size & 0xfff; 1069 dev_cap->rl_caps.max_unit = size >> 14; 1070 MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET); 1071 dev_cap->rl_caps.min_val = size & 0xfff; 1072 dev_cap->rl_caps.min_unit = size >> 14; 1073 } 1074 1075 MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1076 if (field32 & (1 << 16)) 1077 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP; 1078 if (field32 & (1 << 18)) 1079 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB; 1080 if (field32 & (1 << 19)) 1081 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_LB_SRC_CHK; 1082 if (field32 & (1 << 26)) 1083 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL; 1084 if (field32 & (1 << 20)) 1085 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM; 1086 if (field32 & (1 << 21)) 1087 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS; 1088 1089 for (i = 1; i <= dev_cap->num_ports; i++) { 1090 err = mlx4_QUERY_PORT(dev, i, dev_cap->port_cap + i); 1091 if (err) 1092 goto out; 1093 } 1094 1095 /* 1096 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then 1097 * we can't use any EQs whose doorbell falls on that page, 1098 * even if the EQ itself isn't reserved. 1099 */ 1100 if (dev_cap->num_sys_eqs == 0) 1101 dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4, 1102 dev_cap->reserved_eqs); 1103 else 1104 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS; 1105 1106 out: 1107 mlx4_free_cmd_mailbox(dev, mailbox); 1108 return err; 1109 } 1110 1111 void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 1112 { 1113 if (dev_cap->bf_reg_size > 0) 1114 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", 1115 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); 1116 else 1117 mlx4_dbg(dev, "BlueFlame not available\n"); 1118 1119 mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n", 1120 dev_cap->bmme_flags, dev_cap->reserved_lkey); 1121 mlx4_dbg(dev, "Max ICM size %lld MB\n", 1122 (unsigned long long) dev_cap->max_icm_sz >> 20); 1123 mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", 1124 dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz); 1125 mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", 1126 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz); 1127 mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", 1128 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz); 1129 mlx4_dbg(dev, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n", 1130 dev_cap->num_sys_eqs, dev_cap->max_eqs, dev_cap->reserved_eqs, 1131 dev_cap->eqc_entry_sz); 1132 mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", 1133 dev_cap->reserved_mrws, dev_cap->reserved_mtts); 1134 mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", 1135 dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars); 1136 mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", 1137 dev_cap->max_pds, dev_cap->reserved_mgms); 1138 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", 1139 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); 1140 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", 1141 dev_cap->local_ca_ack_delay, 128 << dev_cap->port_cap[1].ib_mtu, 1142 dev_cap->port_cap[1].max_port_width); 1143 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", 1144 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); 1145 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n", 1146 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg); 1147 mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz); 1148 mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters); 1149 mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz); 1150 mlx4_dbg(dev, "DMFS high rate steer QPn base: %d\n", 1151 dev_cap->dmfs_high_rate_qpn_base); 1152 mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n", 1153 dev_cap->dmfs_high_rate_qpn_range); 1154 1155 if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT) { 1156 struct mlx4_rate_limit_caps *rl_caps = &dev_cap->rl_caps; 1157 1158 mlx4_dbg(dev, "QP Rate-Limit: #rates %d, unit/val max %d/%d, min %d/%d\n", 1159 rl_caps->num_rates, rl_caps->max_unit, rl_caps->max_val, 1160 rl_caps->min_unit, rl_caps->min_val); 1161 } 1162 1163 dump_dev_cap_flags(dev, dev_cap->flags); 1164 dump_dev_cap_flags2(dev, dev_cap->flags2); 1165 } 1166 1167 int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap) 1168 { 1169 struct mlx4_cmd_mailbox *mailbox; 1170 u32 *outbox; 1171 u8 field; 1172 u32 field32; 1173 int err; 1174 1175 mailbox = mlx4_alloc_cmd_mailbox(dev); 1176 if (IS_ERR(mailbox)) 1177 return PTR_ERR(mailbox); 1178 outbox = mailbox->buf; 1179 1180 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 1181 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 1182 MLX4_CMD_TIME_CLASS_A, 1183 MLX4_CMD_NATIVE); 1184 1185 if (err) 1186 goto out; 1187 1188 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); 1189 port_cap->max_vl = field >> 4; 1190 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); 1191 port_cap->ib_mtu = field >> 4; 1192 port_cap->max_port_width = field & 0xf; 1193 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); 1194 port_cap->max_gids = 1 << (field & 0xf); 1195 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET); 1196 port_cap->max_pkeys = 1 << (field & 0xf); 1197 } else { 1198 #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00 1199 #define QUERY_PORT_MTU_OFFSET 0x01 1200 #define QUERY_PORT_ETH_MTU_OFFSET 0x02 1201 #define QUERY_PORT_WIDTH_OFFSET 0x06 1202 #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 1203 #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a 1204 #define QUERY_PORT_MAX_VL_OFFSET 0x0b 1205 #define QUERY_PORT_MAC_OFFSET 0x10 1206 #define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18 1207 #define QUERY_PORT_WAVELENGTH_OFFSET 0x1c 1208 #define QUERY_PORT_TRANS_CODE_OFFSET 0x20 1209 1210 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, MLX4_CMD_QUERY_PORT, 1211 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 1212 if (err) 1213 goto out; 1214 1215 MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET); 1216 port_cap->link_state = (field & 0x80) >> 7; 1217 port_cap->supported_port_types = field & 3; 1218 port_cap->suggested_type = (field >> 3) & 1; 1219 port_cap->default_sense = (field >> 4) & 1; 1220 port_cap->dmfs_optimized_state = (field >> 5) & 1; 1221 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); 1222 port_cap->ib_mtu = field & 0xf; 1223 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); 1224 port_cap->max_port_width = field & 0xf; 1225 MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET); 1226 port_cap->max_gids = 1 << (field >> 4); 1227 port_cap->max_pkeys = 1 << (field & 0xf); 1228 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); 1229 port_cap->max_vl = field & 0xf; 1230 port_cap->max_tc_eth = field >> 4; 1231 MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET); 1232 port_cap->log_max_macs = field & 0xf; 1233 port_cap->log_max_vlans = field >> 4; 1234 MLX4_GET(port_cap->eth_mtu, outbox, QUERY_PORT_ETH_MTU_OFFSET); 1235 MLX4_GET(port_cap->def_mac, outbox, QUERY_PORT_MAC_OFFSET); 1236 MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET); 1237 port_cap->trans_type = field32 >> 24; 1238 port_cap->vendor_oui = field32 & 0xffffff; 1239 MLX4_GET(port_cap->wavelength, outbox, QUERY_PORT_WAVELENGTH_OFFSET); 1240 MLX4_GET(port_cap->trans_code, outbox, QUERY_PORT_TRANS_CODE_OFFSET); 1241 } 1242 1243 out: 1244 mlx4_free_cmd_mailbox(dev, mailbox); 1245 return err; 1246 } 1247 1248 #define DEV_CAP_EXT_2_FLAG_PFC_COUNTERS (1 << 28) 1249 #define DEV_CAP_EXT_2_FLAG_VLAN_CONTROL (1 << 26) 1250 #define DEV_CAP_EXT_2_FLAG_80_VFS (1 << 21) 1251 #define DEV_CAP_EXT_2_FLAG_FSM (1 << 20) 1252 1253 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, 1254 struct mlx4_vhcr *vhcr, 1255 struct mlx4_cmd_mailbox *inbox, 1256 struct mlx4_cmd_mailbox *outbox, 1257 struct mlx4_cmd_info *cmd) 1258 { 1259 u64 flags; 1260 int err = 0; 1261 u8 field; 1262 u16 field16; 1263 u32 bmme_flags, field32; 1264 int real_port; 1265 int slave_port; 1266 int first_port; 1267 struct mlx4_active_ports actv_ports; 1268 1269 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 1270 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1271 if (err) 1272 return err; 1273 1274 disable_unsupported_roce_caps(outbox->buf); 1275 /* add port mng change event capability and disable mw type 1 1276 * unconditionally to slaves 1277 */ 1278 MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 1279 flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV; 1280 flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW; 1281 actv_ports = mlx4_get_active_ports(dev, slave); 1282 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports); 1283 for (slave_port = 0, real_port = first_port; 1284 real_port < first_port + 1285 bitmap_weight(actv_ports.ports, dev->caps.num_ports); 1286 ++real_port, ++slave_port) { 1287 if (flags & (MLX4_DEV_CAP_FLAG_WOL_PORT1 << real_port)) 1288 flags |= MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port; 1289 else 1290 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port); 1291 } 1292 for (; slave_port < dev->caps.num_ports; ++slave_port) 1293 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port); 1294 1295 /* Not exposing RSS IP fragments to guests */ 1296 flags &= ~MLX4_DEV_CAP_FLAG_RSS_IP_FRAG; 1297 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 1298 1299 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET); 1300 field &= ~0x0F; 1301 field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F; 1302 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VL_PORT_OFFSET); 1303 1304 /* For guests, disable timestamp */ 1305 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); 1306 field &= 0x7f; 1307 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); 1308 1309 /* For guests, disable vxlan tunneling and QoS support */ 1310 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN); 1311 field &= 0xd7; 1312 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN); 1313 1314 /* For guests, disable port BEACON */ 1315 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_PORT_BEACON_OFFSET); 1316 field &= 0x7f; 1317 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_PORT_BEACON_OFFSET); 1318 1319 /* For guests, report Blueflame disabled */ 1320 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET); 1321 field &= 0x7f; 1322 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); 1323 1324 /* For guests, disable mw type 2 and port remap*/ 1325 MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1326 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; 1327 bmme_flags &= ~MLX4_FLAG_PORT_REMAP; 1328 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1329 1330 /* turn off device-managed steering capability if not enabled */ 1331 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { 1332 MLX4_GET(field, outbox->buf, 1333 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); 1334 field &= 0x7f; 1335 MLX4_PUT(outbox->buf, field, 1336 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); 1337 } 1338 1339 /* turn off ipoib managed steering for guests */ 1340 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); 1341 field &= ~0x80; 1342 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); 1343 1344 /* turn off host side virt features (VST, FSM, etc) for guests */ 1345 MLX4_GET(field32, outbox->buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1346 field32 &= ~(DEV_CAP_EXT_2_FLAG_VLAN_CONTROL | DEV_CAP_EXT_2_FLAG_80_VFS | 1347 DEV_CAP_EXT_2_FLAG_FSM | DEV_CAP_EXT_2_FLAG_PFC_COUNTERS); 1348 MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1349 1350 /* turn off QCN for guests */ 1351 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); 1352 field &= 0xfe; 1353 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); 1354 1355 /* turn off QP max-rate limiting for guests */ 1356 field16 = 0; 1357 MLX4_PUT(outbox->buf, field16, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET); 1358 1359 /* turn off QoS per VF support for guests */ 1360 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); 1361 field &= 0xef; 1362 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); 1363 1364 /* turn off ignore FCS feature for guests */ 1365 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); 1366 field &= 0xfb; 1367 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); 1368 1369 return 0; 1370 } 1371 1372 static void disable_unsupported_roce_caps(void *buf) 1373 { 1374 u32 flags; 1375 1376 MLX4_GET(flags, buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 1377 flags &= ~(1UL << 31); 1378 MLX4_PUT(buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 1379 MLX4_GET(flags, buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1380 flags &= ~(1UL << 24); 1381 MLX4_PUT(buf, flags, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1382 MLX4_GET(flags, buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1383 flags &= ~(MLX4_FLAG_ROCE_V1_V2); 1384 MLX4_PUT(buf, flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1385 } 1386 1387 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, 1388 struct mlx4_vhcr *vhcr, 1389 struct mlx4_cmd_mailbox *inbox, 1390 struct mlx4_cmd_mailbox *outbox, 1391 struct mlx4_cmd_info *cmd) 1392 { 1393 struct mlx4_priv *priv = mlx4_priv(dev); 1394 u64 def_mac; 1395 u8 port_type; 1396 u16 short_field; 1397 int err; 1398 int port = mlx4_slave_convert_port(dev, slave, 1399 vhcr->in_modifier & 0xFF); 1400 1401 #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0 1402 #define MLX4_PORT_LINK_UP_MASK 0x80 1403 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c 1404 #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e 1405 1406 if (port < 0) 1407 return -EINVAL; 1408 1409 /* Protect against untrusted guests: enforce that this is the 1410 * QUERY_PORT general query. 1411 */ 1412 if (vhcr->op_modifier || vhcr->in_modifier & ~0xFF) 1413 return -EINVAL; 1414 1415 vhcr->in_modifier = port; 1416 1417 err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, 1418 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, 1419 MLX4_CMD_NATIVE); 1420 1421 if (!err && dev->caps.function != slave) { 1422 def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac; 1423 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET); 1424 1425 /* get port type - currently only eth is enabled */ 1426 MLX4_GET(port_type, outbox->buf, 1427 QUERY_PORT_SUPPORTED_TYPE_OFFSET); 1428 1429 /* No link sensing allowed */ 1430 port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK; 1431 /* set port type to currently operating port type */ 1432 port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3); 1433 1434 if (0 /* IFLA_VF_LINK_STATE_ENABLE == admin_link_state */) 1435 port_type |= MLX4_PORT_LINK_UP_MASK; 1436 else if (1 /* IFLA_VF_LINK_STATE_DISABLE == admin_link_state */) 1437 port_type &= ~MLX4_PORT_LINK_UP_MASK; 1438 else if (0 /* IFLA_VF_LINK_STATE_AUTO == admin_link_state && mlx4_is_bonded(dev) */) { 1439 int other_port = (port == 1) ? 2 : 1; 1440 struct mlx4_port_cap port_cap; 1441 1442 err = mlx4_QUERY_PORT(dev, other_port, &port_cap); 1443 if (err) 1444 goto out; 1445 port_type |= (port_cap.link_state << 7); 1446 } 1447 1448 MLX4_PUT(outbox->buf, port_type, 1449 QUERY_PORT_SUPPORTED_TYPE_OFFSET); 1450 1451 if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH) 1452 short_field = mlx4_get_slave_num_gids(dev, slave, port); 1453 else 1454 short_field = 1; /* slave max gids */ 1455 MLX4_PUT(outbox->buf, short_field, 1456 QUERY_PORT_CUR_MAX_GID_OFFSET); 1457 1458 short_field = dev->caps.pkey_table_len[vhcr->in_modifier]; 1459 MLX4_PUT(outbox->buf, short_field, 1460 QUERY_PORT_CUR_MAX_PKEY_OFFSET); 1461 } 1462 out: 1463 return err; 1464 } 1465 1466 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port, 1467 int *gid_tbl_len, int *pkey_tbl_len) 1468 { 1469 struct mlx4_cmd_mailbox *mailbox; 1470 u32 *outbox; 1471 u16 field; 1472 int err; 1473 1474 mailbox = mlx4_alloc_cmd_mailbox(dev); 1475 if (IS_ERR(mailbox)) 1476 return PTR_ERR(mailbox); 1477 1478 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, 1479 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, 1480 MLX4_CMD_WRAPPED); 1481 if (err) 1482 goto out; 1483 1484 outbox = mailbox->buf; 1485 1486 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET); 1487 *gid_tbl_len = field; 1488 1489 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET); 1490 *pkey_tbl_len = field; 1491 1492 out: 1493 mlx4_free_cmd_mailbox(dev, mailbox); 1494 return err; 1495 } 1496 EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len); 1497 1498 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) 1499 { 1500 struct mlx4_cmd_mailbox *mailbox; 1501 struct mlx4_icm_iter iter; 1502 __be64 *pages; 1503 int lg; 1504 int nent = 0; 1505 int i; 1506 int err = 0; 1507 int ts = 0, tc = 0; 1508 1509 mailbox = mlx4_alloc_cmd_mailbox(dev); 1510 if (IS_ERR(mailbox)) 1511 return PTR_ERR(mailbox); 1512 pages = mailbox->buf; 1513 1514 for (mlx4_icm_first(icm, &iter); 1515 !mlx4_icm_last(&iter); 1516 mlx4_icm_next(&iter)) { 1517 /* 1518 * We have to pass pages that are aligned to their 1519 * size, so find the least significant 1 in the 1520 * address or size and use that as our log2 size. 1521 */ 1522 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1; 1523 if (lg < MLX4_ICM_PAGE_SHIFT) { 1524 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n", 1525 MLX4_ICM_PAGE_SIZE, 1526 (unsigned long long) mlx4_icm_addr(&iter), 1527 mlx4_icm_size(&iter)); 1528 err = -EINVAL; 1529 goto out; 1530 } 1531 1532 for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) { 1533 if (virt != -1) { 1534 pages[nent * 2] = cpu_to_be64(virt); 1535 virt += 1 << lg; 1536 } 1537 1538 pages[nent * 2 + 1] = 1539 cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) | 1540 (lg - MLX4_ICM_PAGE_SHIFT)); 1541 ts += 1 << (lg - 10); 1542 ++tc; 1543 1544 if (++nent == MLX4_MAILBOX_SIZE / 16) { 1545 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, 1546 MLX4_CMD_TIME_CLASS_B, 1547 MLX4_CMD_NATIVE); 1548 if (err) 1549 goto out; 1550 nent = 0; 1551 } 1552 } 1553 } 1554 1555 if (nent) 1556 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, 1557 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 1558 if (err) 1559 goto out; 1560 1561 switch (op) { 1562 case MLX4_CMD_MAP_FA: 1563 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts); 1564 break; 1565 case MLX4_CMD_MAP_ICM_AUX: 1566 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts); 1567 break; 1568 case MLX4_CMD_MAP_ICM: 1569 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n", 1570 tc, ts, (unsigned long long) virt - (ts << 10)); 1571 break; 1572 } 1573 1574 out: 1575 mlx4_free_cmd_mailbox(dev, mailbox); 1576 return err; 1577 } 1578 1579 int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm) 1580 { 1581 return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1); 1582 } 1583 1584 int mlx4_UNMAP_FA(struct mlx4_dev *dev) 1585 { 1586 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, 1587 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 1588 } 1589 1590 1591 int mlx4_RUN_FW(struct mlx4_dev *dev) 1592 { 1593 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, 1594 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1595 } 1596 1597 int mlx4_QUERY_FW(struct mlx4_dev *dev) 1598 { 1599 struct mlx4_fw *fw = &mlx4_priv(dev)->fw; 1600 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; 1601 struct mlx4_cmd_mailbox *mailbox; 1602 u32 *outbox; 1603 int err = 0; 1604 u64 fw_ver; 1605 u16 cmd_if_rev; 1606 u8 lg; 1607 1608 #define QUERY_FW_OUT_SIZE 0x100 1609 #define QUERY_FW_VER_OFFSET 0x00 1610 #define QUERY_FW_PPF_ID 0x09 1611 #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a 1612 #define QUERY_FW_MAX_CMD_OFFSET 0x0f 1613 #define QUERY_FW_ERR_START_OFFSET 0x30 1614 #define QUERY_FW_ERR_SIZE_OFFSET 0x38 1615 #define QUERY_FW_ERR_BAR_OFFSET 0x3c 1616 1617 #define QUERY_FW_SIZE_OFFSET 0x00 1618 #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20 1619 #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28 1620 1621 #define QUERY_FW_COMM_BASE_OFFSET 0x40 1622 #define QUERY_FW_COMM_BAR_OFFSET 0x48 1623 1624 #define QUERY_FW_CLOCK_OFFSET 0x50 1625 #define QUERY_FW_CLOCK_BAR 0x58 1626 1627 mailbox = mlx4_alloc_cmd_mailbox(dev); 1628 if (IS_ERR(mailbox)) 1629 return PTR_ERR(mailbox); 1630 outbox = mailbox->buf; 1631 1632 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW, 1633 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1634 if (err) 1635 goto out; 1636 1637 MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET); 1638 /* 1639 * FW subminor version is at more significant bits than minor 1640 * version, so swap here. 1641 */ 1642 dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) | 1643 ((fw_ver & 0xffff0000ull) >> 16) | 1644 ((fw_ver & 0x0000ffffull) << 16); 1645 1646 MLX4_GET(lg, outbox, QUERY_FW_PPF_ID); 1647 dev->caps.function = lg; 1648 1649 if (mlx4_is_slave(dev)) 1650 goto out; 1651 1652 1653 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); 1654 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || 1655 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) { 1656 mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n", 1657 cmd_if_rev); 1658 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n", 1659 (int) (dev->caps.fw_ver >> 32), 1660 (int) (dev->caps.fw_ver >> 16) & 0xffff, 1661 (int) dev->caps.fw_ver & 0xffff); 1662 mlx4_err(dev, "This driver version supports only revisions %d to %d\n", 1663 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV); 1664 err = -ENODEV; 1665 goto out; 1666 } 1667 1668 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS) 1669 dev->flags |= MLX4_FLAG_OLD_PORT_CMDS; 1670 1671 MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); 1672 cmd->max_cmds = 1 << lg; 1673 1674 mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n", 1675 (int) (dev->caps.fw_ver >> 32), 1676 (int) (dev->caps.fw_ver >> 16) & 0xffff, 1677 (int) dev->caps.fw_ver & 0xffff, 1678 cmd_if_rev, cmd->max_cmds); 1679 1680 MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET); 1681 MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET); 1682 MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET); 1683 fw->catas_bar = (fw->catas_bar >> 6) * 2; 1684 1685 mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n", 1686 (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar); 1687 1688 MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET); 1689 MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET); 1690 MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET); 1691 fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2; 1692 1693 MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET); 1694 MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET); 1695 fw->comm_bar = (fw->comm_bar >> 6) * 2; 1696 mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n", 1697 fw->comm_bar, (unsigned long long)fw->comm_base); 1698 mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2); 1699 1700 MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET); 1701 MLX4_GET(fw->clock_bar, outbox, QUERY_FW_CLOCK_BAR); 1702 fw->clock_bar = (fw->clock_bar >> 6) * 2; 1703 mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n", 1704 fw->clock_bar, (unsigned long long)fw->clock_offset); 1705 1706 /* 1707 * Round up number of system pages needed in case 1708 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. 1709 */ 1710 fw->fw_pages = 1711 ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> 1712 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); 1713 1714 mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n", 1715 (unsigned long long) fw->clr_int_base, fw->clr_int_bar); 1716 1717 out: 1718 mlx4_free_cmd_mailbox(dev, mailbox); 1719 return err; 1720 } 1721 1722 int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave, 1723 struct mlx4_vhcr *vhcr, 1724 struct mlx4_cmd_mailbox *inbox, 1725 struct mlx4_cmd_mailbox *outbox, 1726 struct mlx4_cmd_info *cmd) 1727 { 1728 u8 *outbuf; 1729 int err; 1730 1731 outbuf = outbox->buf; 1732 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW, 1733 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1734 if (err) 1735 return err; 1736 1737 /* for slaves, set pci PPF ID to invalid and zero out everything 1738 * else except FW version */ 1739 outbuf[0] = outbuf[1] = 0; 1740 memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8); 1741 outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID; 1742 1743 return 0; 1744 } 1745 1746 static void get_board_id(void *vsd, char *board_id) 1747 { 1748 int i; 1749 1750 #define VSD_OFFSET_SIG1 0x00 1751 #define VSD_OFFSET_SIG2 0xde 1752 #define VSD_OFFSET_MLX_BOARD_ID 0xd0 1753 #define VSD_OFFSET_TS_BOARD_ID 0x20 1754 1755 #define VSD_SIGNATURE_TOPSPIN 0x5ad 1756 1757 memset(board_id, 0, MLX4_BOARD_ID_LEN); 1758 1759 if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN && 1760 be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) { 1761 strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN); 1762 } else { 1763 /* 1764 * The board ID is a string but the firmware byte 1765 * swaps each 4-byte word before passing it back to 1766 * us. Therefore we need to swab it before printing. 1767 */ 1768 u32 *bid_u32 = (u32 *)board_id; 1769 1770 for (i = 0; i < 4; ++i) { 1771 typedef struct { u32 value; } __packed u64_p_t; 1772 1773 u32 *addr; 1774 u32 val; 1775 1776 addr = (u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4); 1777 val = ((u64_p_t *)addr)->value; 1778 val = swab32(val); 1779 ((u64_p_t *)&bid_u32[i])->value = val; 1780 } 1781 } 1782 } 1783 1784 int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter) 1785 { 1786 struct mlx4_cmd_mailbox *mailbox; 1787 u32 *outbox; 1788 int err; 1789 1790 #define QUERY_ADAPTER_OUT_SIZE 0x100 1791 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 1792 #define QUERY_ADAPTER_VSD_OFFSET 0x20 1793 1794 mailbox = mlx4_alloc_cmd_mailbox(dev); 1795 if (IS_ERR(mailbox)) 1796 return PTR_ERR(mailbox); 1797 outbox = mailbox->buf; 1798 1799 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER, 1800 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1801 if (err) 1802 goto out; 1803 1804 MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); 1805 1806 get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4, 1807 adapter->board_id); 1808 1809 out: 1810 mlx4_free_cmd_mailbox(dev, mailbox); 1811 return err; 1812 } 1813 1814 int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) 1815 { 1816 struct mlx4_cmd_mailbox *mailbox; 1817 __be32 *inbox; 1818 int err; 1819 static const u8 a0_dmfs_hw_steering[] = { 1820 [MLX4_STEERING_DMFS_A0_DEFAULT] = 0, 1821 [MLX4_STEERING_DMFS_A0_DYNAMIC] = 1, 1822 [MLX4_STEERING_DMFS_A0_STATIC] = 2, 1823 [MLX4_STEERING_DMFS_A0_DISABLE] = 3 1824 }; 1825 1826 #define INIT_HCA_IN_SIZE 0x200 1827 #define INIT_HCA_VERSION_OFFSET 0x000 1828 #define INIT_HCA_VERSION 2 1829 #define INIT_HCA_VXLAN_OFFSET 0x0c 1830 #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e 1831 #define INIT_HCA_FLAGS_OFFSET 0x014 1832 #define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018 1833 #define INIT_HCA_QPC_OFFSET 0x020 1834 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) 1835 #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) 1836 #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28) 1837 #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f) 1838 #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30) 1839 #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37) 1840 #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38) 1841 #define INIT_HCA_EQE_CQE_STRIDE_OFFSET (INIT_HCA_QPC_OFFSET + 0x3b) 1842 #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40) 1843 #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) 1844 #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) 1845 #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67) 1846 #define INIT_HCA_NUM_SYS_EQS_OFFSET (INIT_HCA_QPC_OFFSET + 0x6a) 1847 #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70) 1848 #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77) 1849 #define INIT_HCA_MCAST_OFFSET 0x0c0 1850 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) 1851 #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) 1852 #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) 1853 #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18) 1854 #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) 1855 #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6 1856 #define INIT_HCA_FS_PARAM_OFFSET 0x1d0 1857 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00) 1858 #define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12) 1859 #define INIT_HCA_FS_A0_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x18) 1860 #define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b) 1861 #define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21) 1862 #define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22) 1863 #define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25) 1864 #define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26) 1865 #define INIT_HCA_TPT_OFFSET 0x0f0 1866 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) 1867 #define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08) 1868 #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) 1869 #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10) 1870 #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18) 1871 #define INIT_HCA_UAR_OFFSET 0x120 1872 #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a) 1873 #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b) 1874 1875 mailbox = mlx4_alloc_cmd_mailbox(dev); 1876 if (IS_ERR(mailbox)) 1877 return PTR_ERR(mailbox); 1878 inbox = mailbox->buf; 1879 1880 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; 1881 1882 *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) = 1883 (ilog2(cache_line_size()) - 4) << 5; 1884 1885 #if defined(__LITTLE_ENDIAN) 1886 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); 1887 #elif defined(__BIG_ENDIAN) 1888 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1); 1889 #else 1890 #error Host endianness not defined 1891 #endif 1892 /* Check port for UD address vector: */ 1893 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1); 1894 1895 /* Enable IPoIB checksumming if we can: */ 1896 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) 1897 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3); 1898 1899 /* Enable QoS support if module parameter set */ 1900 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG && enable_qos) 1901 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2); 1902 1903 /* enable counters */ 1904 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS) 1905 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4); 1906 1907 /* Enable RSS spread to fragmented IP packets when supported */ 1908 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_RSS_IP_FRAG) 1909 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 13); 1910 1911 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ 1912 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) { 1913 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29); 1914 dev->caps.eqe_size = 64; 1915 dev->caps.eqe_factor = 1; 1916 } else { 1917 dev->caps.eqe_size = 32; 1918 dev->caps.eqe_factor = 0; 1919 } 1920 1921 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) { 1922 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30); 1923 dev->caps.cqe_size = 64; 1924 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 1925 } else { 1926 dev->caps.cqe_size = 32; 1927 } 1928 1929 #if 0 1930 /* XXX not currently supported by the FreeBSD's mlxen */ 1931 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */ 1932 if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) && 1933 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) { 1934 dev->caps.eqe_size = cache_line_size(); 1935 dev->caps.cqe_size = cache_line_size(); 1936 dev->caps.eqe_factor = 0; 1937 MLX4_PUT(inbox, (u8)((ilog2(dev->caps.eqe_size) - 5) << 4 | 1938 (ilog2(dev->caps.eqe_size) - 5)), 1939 INIT_HCA_EQE_CQE_STRIDE_OFFSET); 1940 1941 /* User still need to know to support CQE > 32B */ 1942 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 1943 } 1944 #endif 1945 1946 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) 1947 *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1U << 31); 1948 1949 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 1950 1951 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); 1952 MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET); 1953 MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET); 1954 MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET); 1955 MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET); 1956 MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET); 1957 MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET); 1958 MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET); 1959 MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET); 1960 MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET); 1961 MLX4_PUT(inbox, param->num_sys_eqs, INIT_HCA_NUM_SYS_EQS_OFFSET); 1962 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET); 1963 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET); 1964 1965 /* steering attributes */ 1966 if (dev->caps.steering_mode == 1967 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1968 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= 1969 cpu_to_be32(1 << 1970 INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN); 1971 1972 MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET); 1973 MLX4_PUT(inbox, param->log_mc_entry_sz, 1974 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); 1975 MLX4_PUT(inbox, param->log_mc_table_sz, 1976 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); 1977 /* Enable Ethernet flow steering 1978 * with udp unicast and tcp unicast 1979 */ 1980 if (dev->caps.dmfs_high_steer_mode != 1981 MLX4_STEERING_DMFS_A0_STATIC) 1982 MLX4_PUT(inbox, 1983 (u8)(MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN), 1984 INIT_HCA_FS_ETH_BITS_OFFSET); 1985 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, 1986 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET); 1987 /* Enable IPoIB flow steering 1988 * with udp unicast and tcp unicast 1989 */ 1990 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN), 1991 INIT_HCA_FS_IB_BITS_OFFSET); 1992 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, 1993 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET); 1994 1995 if (dev->caps.dmfs_high_steer_mode != 1996 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 1997 MLX4_PUT(inbox, 1998 ((u8)(a0_dmfs_hw_steering[dev->caps.dmfs_high_steer_mode] 1999 << 6)), 2000 INIT_HCA_FS_A0_OFFSET); 2001 } else { 2002 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); 2003 MLX4_PUT(inbox, param->log_mc_entry_sz, 2004 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 2005 MLX4_PUT(inbox, param->log_mc_hash_sz, 2006 INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 2007 MLX4_PUT(inbox, param->log_mc_table_sz, 2008 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 2009 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0) 2010 MLX4_PUT(inbox, (u8) (1 << 3), 2011 INIT_HCA_UC_STEERING_OFFSET); 2012 } 2013 2014 /* TPT attributes */ 2015 2016 MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET); 2017 MLX4_PUT(inbox, param->mw_enabled, INIT_HCA_TPT_MW_OFFSET); 2018 MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET); 2019 MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET); 2020 MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET); 2021 2022 /* UAR attributes */ 2023 2024 MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET); 2025 MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET); 2026 2027 /* set parser VXLAN attributes */ 2028 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) { 2029 u8 parser_params = 0; 2030 MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET); 2031 } 2032 2033 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 2034 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 2035 2036 if (err) 2037 mlx4_err(dev, "INIT_HCA returns %d\n", err); 2038 2039 mlx4_free_cmd_mailbox(dev, mailbox); 2040 return err; 2041 } 2042 2043 int mlx4_QUERY_HCA(struct mlx4_dev *dev, 2044 struct mlx4_init_hca_param *param) 2045 { 2046 struct mlx4_cmd_mailbox *mailbox; 2047 __be32 *outbox; 2048 u32 dword_field; 2049 int err; 2050 u8 byte_field; 2051 static const u8 a0_dmfs_query_hw_steering[] = { 2052 [0] = MLX4_STEERING_DMFS_A0_DEFAULT, 2053 [1] = MLX4_STEERING_DMFS_A0_DYNAMIC, 2054 [2] = MLX4_STEERING_DMFS_A0_STATIC, 2055 [3] = MLX4_STEERING_DMFS_A0_DISABLE 2056 }; 2057 2058 #define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04 2059 #define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c 2060 2061 mailbox = mlx4_alloc_cmd_mailbox(dev); 2062 if (IS_ERR(mailbox)) 2063 return PTR_ERR(mailbox); 2064 outbox = mailbox->buf; 2065 2066 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, 2067 MLX4_CMD_QUERY_HCA, 2068 MLX4_CMD_TIME_CLASS_B, 2069 !mlx4_is_slave(dev)); 2070 if (err) 2071 goto out; 2072 2073 MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET); 2074 MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET); 2075 2076 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 2077 2078 MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET); 2079 MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET); 2080 MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET); 2081 MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET); 2082 MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET); 2083 MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET); 2084 MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET); 2085 MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET); 2086 MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET); 2087 MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET); 2088 MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET); 2089 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); 2090 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); 2091 2092 MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET); 2093 if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) { 2094 param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; 2095 } else { 2096 MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET); 2097 if (byte_field & 0x8) 2098 param->steering_mode = MLX4_STEERING_MODE_B0; 2099 else 2100 param->steering_mode = MLX4_STEERING_MODE_A0; 2101 } 2102 2103 if (dword_field & (1 << 13)) 2104 param->rss_ip_frags = 1; 2105 2106 /* steering attributes */ 2107 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 2108 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); 2109 MLX4_GET(param->log_mc_entry_sz, outbox, 2110 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); 2111 MLX4_GET(param->log_mc_table_sz, outbox, 2112 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); 2113 MLX4_GET(byte_field, outbox, 2114 INIT_HCA_FS_A0_OFFSET); 2115 param->dmfs_high_steer_mode = 2116 a0_dmfs_query_hw_steering[(byte_field >> 6) & 3]; 2117 } else { 2118 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); 2119 MLX4_GET(param->log_mc_entry_sz, outbox, 2120 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 2121 MLX4_GET(param->log_mc_hash_sz, outbox, 2122 INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 2123 MLX4_GET(param->log_mc_table_sz, outbox, 2124 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 2125 } 2126 2127 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ 2128 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS); 2129 if (byte_field & 0x20) /* 64-bytes eqe enabled */ 2130 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED; 2131 if (byte_field & 0x40) /* 64-bytes cqe enabled */ 2132 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED; 2133 2134 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */ 2135 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET); 2136 if (byte_field) { 2137 param->dev_cap_enabled |= MLX4_DEV_CAP_EQE_STRIDE_ENABLED; 2138 param->dev_cap_enabled |= MLX4_DEV_CAP_CQE_STRIDE_ENABLED; 2139 param->cqe_size = 1 << ((byte_field & 2140 MLX4_CQE_SIZE_MASK_STRIDE) + 5); 2141 param->eqe_size = 1 << (((byte_field & 2142 MLX4_EQE_SIZE_MASK_STRIDE) >> 4) + 5); 2143 } 2144 2145 /* TPT attributes */ 2146 2147 MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); 2148 MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET); 2149 MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); 2150 MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET); 2151 MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET); 2152 2153 /* UAR attributes */ 2154 2155 MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET); 2156 MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); 2157 2158 /* phv_check enable */ 2159 MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET); 2160 if (byte_field & 0x2) 2161 param->phv_check_en = 1; 2162 out: 2163 mlx4_free_cmd_mailbox(dev, mailbox); 2164 2165 return err; 2166 } 2167 2168 static int mlx4_hca_core_clock_update(struct mlx4_dev *dev) 2169 { 2170 struct mlx4_cmd_mailbox *mailbox; 2171 __be32 *outbox; 2172 int err; 2173 2174 mailbox = mlx4_alloc_cmd_mailbox(dev); 2175 if (IS_ERR(mailbox)) { 2176 mlx4_warn(dev, "hca_core_clock mailbox allocation failed\n"); 2177 return PTR_ERR(mailbox); 2178 } 2179 outbox = mailbox->buf; 2180 2181 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, 2182 MLX4_CMD_QUERY_HCA, 2183 MLX4_CMD_TIME_CLASS_B, 2184 !mlx4_is_slave(dev)); 2185 if (err) { 2186 mlx4_warn(dev, "hca_core_clock update failed\n"); 2187 goto out; 2188 } 2189 2190 MLX4_GET(dev->caps.hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET); 2191 2192 out: 2193 mlx4_free_cmd_mailbox(dev, mailbox); 2194 2195 return err; 2196 } 2197 2198 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0 2199 * and real QP0 are active, so that the paravirtualized QP0 is ready 2200 * to operate */ 2201 static int check_qp0_state(struct mlx4_dev *dev, int function, int port) 2202 { 2203 struct mlx4_priv *priv = mlx4_priv(dev); 2204 /* irrelevant if not infiniband */ 2205 if (priv->mfunc.master.qp0_state[port].proxy_qp0_active && 2206 priv->mfunc.master.qp0_state[port].qp0_active) 2207 return 1; 2208 return 0; 2209 } 2210 2211 int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave, 2212 struct mlx4_vhcr *vhcr, 2213 struct mlx4_cmd_mailbox *inbox, 2214 struct mlx4_cmd_mailbox *outbox, 2215 struct mlx4_cmd_info *cmd) 2216 { 2217 struct mlx4_priv *priv = mlx4_priv(dev); 2218 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier); 2219 int err; 2220 2221 if (port < 0) 2222 return -EINVAL; 2223 2224 if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port)) 2225 return 0; 2226 2227 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { 2228 /* Enable port only if it was previously disabled */ 2229 if (!priv->mfunc.master.init_port_ref[port]) { 2230 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, 2231 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2232 if (err) 2233 return err; 2234 } 2235 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); 2236 } else { 2237 if (slave == mlx4_master_func_num(dev)) { 2238 if (check_qp0_state(dev, slave, port) && 2239 !priv->mfunc.master.qp0_state[port].port_active) { 2240 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, 2241 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2242 if (err) 2243 return err; 2244 priv->mfunc.master.qp0_state[port].port_active = 1; 2245 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); 2246 } 2247 } else 2248 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); 2249 } 2250 ++priv->mfunc.master.init_port_ref[port]; 2251 return 0; 2252 } 2253 2254 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) 2255 { 2256 struct mlx4_cmd_mailbox *mailbox; 2257 u32 *inbox; 2258 int err; 2259 u32 flags; 2260 u16 field; 2261 2262 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 2263 #define INIT_PORT_IN_SIZE 256 2264 #define INIT_PORT_FLAGS_OFFSET 0x00 2265 #define INIT_PORT_FLAG_SIG (1 << 18) 2266 #define INIT_PORT_FLAG_NG (1 << 17) 2267 #define INIT_PORT_FLAG_G0 (1 << 16) 2268 #define INIT_PORT_VL_SHIFT 4 2269 #define INIT_PORT_PORT_WIDTH_SHIFT 8 2270 #define INIT_PORT_MTU_OFFSET 0x04 2271 #define INIT_PORT_MAX_GID_OFFSET 0x06 2272 #define INIT_PORT_MAX_PKEY_OFFSET 0x0a 2273 #define INIT_PORT_GUID0_OFFSET 0x10 2274 #define INIT_PORT_NODE_GUID_OFFSET 0x18 2275 #define INIT_PORT_SI_GUID_OFFSET 0x20 2276 2277 mailbox = mlx4_alloc_cmd_mailbox(dev); 2278 if (IS_ERR(mailbox)) 2279 return PTR_ERR(mailbox); 2280 inbox = mailbox->buf; 2281 2282 flags = 0; 2283 flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT; 2284 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; 2285 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET); 2286 2287 field = 128 << dev->caps.ib_mtu_cap[port]; 2288 MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET); 2289 field = dev->caps.gid_table_len[port]; 2290 MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET); 2291 field = dev->caps.pkey_table_len[port]; 2292 MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET); 2293 2294 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT, 2295 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2296 2297 mlx4_free_cmd_mailbox(dev, mailbox); 2298 } else 2299 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, 2300 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2301 2302 if (!err) 2303 mlx4_hca_core_clock_update(dev); 2304 2305 return err; 2306 } 2307 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT); 2308 2309 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, 2310 struct mlx4_vhcr *vhcr, 2311 struct mlx4_cmd_mailbox *inbox, 2312 struct mlx4_cmd_mailbox *outbox, 2313 struct mlx4_cmd_info *cmd) 2314 { 2315 struct mlx4_priv *priv = mlx4_priv(dev); 2316 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier); 2317 int err; 2318 2319 if (port < 0) 2320 return -EINVAL; 2321 2322 if (!(priv->mfunc.master.slave_state[slave].init_port_mask & 2323 (1 << port))) 2324 return 0; 2325 2326 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { 2327 if (priv->mfunc.master.init_port_ref[port] == 1) { 2328 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 2329 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2330 if (err) 2331 return err; 2332 } 2333 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); 2334 } else { 2335 /* infiniband port */ 2336 if (slave == mlx4_master_func_num(dev)) { 2337 if (!priv->mfunc.master.qp0_state[port].qp0_active && 2338 priv->mfunc.master.qp0_state[port].port_active) { 2339 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 2340 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2341 if (err) 2342 return err; 2343 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); 2344 priv->mfunc.master.qp0_state[port].port_active = 0; 2345 } 2346 } else 2347 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); 2348 } 2349 --priv->mfunc.master.init_port_ref[port]; 2350 return 0; 2351 } 2352 2353 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port) 2354 { 2355 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 2356 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2357 } 2358 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT); 2359 2360 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) 2361 { 2362 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 2363 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 2364 } 2365 2366 struct mlx4_config_dev { 2367 __be32 update_flags; 2368 __be32 rsvd1[3]; 2369 __be16 vxlan_udp_dport; 2370 __be16 rsvd2; 2371 __be16 roce_v2_entropy; 2372 __be16 roce_v2_udp_dport; 2373 __be32 roce_flags; 2374 __be32 rsvd4[25]; 2375 __be16 rsvd5; 2376 u8 rsvd6; 2377 u8 rx_checksum_val; 2378 }; 2379 2380 #define MLX4_VXLAN_UDP_DPORT (1 << 0) 2381 #define MLX4_ROCE_V2_UDP_DPORT BIT(3) 2382 #define MLX4_DISABLE_RX_PORT BIT(18) 2383 2384 static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) 2385 { 2386 int err; 2387 struct mlx4_cmd_mailbox *mailbox; 2388 2389 mailbox = mlx4_alloc_cmd_mailbox(dev); 2390 if (IS_ERR(mailbox)) 2391 return PTR_ERR(mailbox); 2392 2393 memcpy(mailbox->buf, config_dev, sizeof(*config_dev)); 2394 2395 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_CONFIG_DEV, 2396 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 2397 2398 mlx4_free_cmd_mailbox(dev, mailbox); 2399 return err; 2400 } 2401 2402 static int mlx4_CONFIG_DEV_get(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) 2403 { 2404 int err; 2405 struct mlx4_cmd_mailbox *mailbox; 2406 2407 mailbox = mlx4_alloc_cmd_mailbox(dev); 2408 if (IS_ERR(mailbox)) 2409 return PTR_ERR(mailbox); 2410 2411 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 1, MLX4_CMD_CONFIG_DEV, 2412 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2413 2414 if (!err) 2415 memcpy(config_dev, mailbox->buf, sizeof(*config_dev)); 2416 2417 mlx4_free_cmd_mailbox(dev, mailbox); 2418 return err; 2419 } 2420 2421 /* Conversion between the HW values and the actual functionality. 2422 * The value represented by the array index, 2423 * and the functionality determined by the flags. 2424 */ 2425 static const u8 config_dev_csum_flags[] = { 2426 [0] = 0, 2427 [1] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP, 2428 [2] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP | 2429 MLX4_RX_CSUM_MODE_L4, 2430 [3] = MLX4_RX_CSUM_MODE_L4 | 2431 MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP | 2432 MLX4_RX_CSUM_MODE_MULTI_VLAN 2433 }; 2434 2435 int mlx4_config_dev_retrieval(struct mlx4_dev *dev, 2436 struct mlx4_config_dev_params *params) 2437 { 2438 struct mlx4_config_dev config_dev = {0}; 2439 int err; 2440 u8 csum_mask; 2441 2442 #define CONFIG_DEV_RX_CSUM_MODE_MASK 0x7 2443 #define CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET 0 2444 #define CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET 4 2445 2446 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CONFIG_DEV)) 2447 return -ENOTSUPP; 2448 2449 err = mlx4_CONFIG_DEV_get(dev, &config_dev); 2450 if (err) 2451 return err; 2452 2453 csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET) & 2454 CONFIG_DEV_RX_CSUM_MODE_MASK; 2455 2456 if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0])) 2457 return -EINVAL; 2458 params->rx_csum_flags_port_1 = config_dev_csum_flags[csum_mask]; 2459 2460 csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET) & 2461 CONFIG_DEV_RX_CSUM_MODE_MASK; 2462 2463 if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0])) 2464 return -EINVAL; 2465 params->rx_csum_flags_port_2 = config_dev_csum_flags[csum_mask]; 2466 2467 params->vxlan_udp_dport = be16_to_cpu(config_dev.vxlan_udp_dport); 2468 2469 return 0; 2470 } 2471 EXPORT_SYMBOL_GPL(mlx4_config_dev_retrieval); 2472 2473 int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port) 2474 { 2475 struct mlx4_config_dev config_dev; 2476 2477 memset(&config_dev, 0, sizeof(config_dev)); 2478 config_dev.update_flags = cpu_to_be32(MLX4_VXLAN_UDP_DPORT); 2479 config_dev.vxlan_udp_dport = udp_port; 2480 2481 return mlx4_CONFIG_DEV_set(dev, &config_dev); 2482 } 2483 EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port); 2484 2485 #define CONFIG_DISABLE_RX_PORT BIT(15) 2486 int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis) 2487 { 2488 struct mlx4_config_dev config_dev; 2489 2490 memset(&config_dev, 0, sizeof(config_dev)); 2491 config_dev.update_flags = cpu_to_be32(MLX4_DISABLE_RX_PORT); 2492 if (dis) 2493 config_dev.roce_flags = 2494 cpu_to_be32(CONFIG_DISABLE_RX_PORT); 2495 2496 return mlx4_CONFIG_DEV_set(dev, &config_dev); 2497 } 2498 2499 int mlx4_config_roce_v2_port(struct mlx4_dev *dev, u16 udp_port) 2500 { 2501 struct mlx4_config_dev config_dev; 2502 2503 memset(&config_dev, 0, sizeof(config_dev)); 2504 config_dev.update_flags = cpu_to_be32(MLX4_ROCE_V2_UDP_DPORT); 2505 config_dev.roce_v2_udp_dport = cpu_to_be16(udp_port); 2506 2507 return mlx4_CONFIG_DEV_set(dev, &config_dev); 2508 } 2509 EXPORT_SYMBOL_GPL(mlx4_config_roce_v2_port); 2510 2511 int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2) 2512 { 2513 struct mlx4_cmd_mailbox *mailbox; 2514 struct { 2515 __be32 v_port1; 2516 __be32 v_port2; 2517 } *v2p; 2518 int err; 2519 2520 mailbox = mlx4_alloc_cmd_mailbox(dev); 2521 if (IS_ERR(mailbox)) 2522 return -ENOMEM; 2523 2524 v2p = mailbox->buf; 2525 v2p->v_port1 = cpu_to_be32(port1); 2526 v2p->v_port2 = cpu_to_be32(port2); 2527 2528 err = mlx4_cmd(dev, mailbox->dma, 0, 2529 MLX4_SET_PORT_VIRT2PHY, MLX4_CMD_VIRT_PORT_MAP, 2530 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 2531 2532 mlx4_free_cmd_mailbox(dev, mailbox); 2533 return err; 2534 } 2535 2536 2537 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) 2538 { 2539 int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0, 2540 MLX4_CMD_SET_ICM_SIZE, 2541 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2542 if (ret) 2543 return ret; 2544 2545 /* 2546 * Round up number of system pages needed in case 2547 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. 2548 */ 2549 *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> 2550 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); 2551 2552 return 0; 2553 } 2554 2555 int mlx4_NOP(struct mlx4_dev *dev) 2556 { 2557 /* Input modifier of 0x1f means "finish as soon as possible." */ 2558 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A, 2559 MLX4_CMD_NATIVE); 2560 } 2561 2562 int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier, 2563 const u32 offset[], 2564 u32 value[], size_t array_len, u8 port) 2565 { 2566 struct mlx4_cmd_mailbox *mailbox; 2567 u32 *outbox; 2568 size_t i; 2569 int ret; 2570 2571 mailbox = mlx4_alloc_cmd_mailbox(dev); 2572 if (IS_ERR(mailbox)) 2573 return PTR_ERR(mailbox); 2574 2575 outbox = mailbox->buf; 2576 2577 ret = mlx4_cmd_box(dev, 0, mailbox->dma, port, op_modifier, 2578 MLX4_CMD_DIAG_RPRT, MLX4_CMD_TIME_CLASS_A, 2579 MLX4_CMD_NATIVE); 2580 if (ret) 2581 goto out; 2582 2583 for (i = 0; i < array_len; i++) { 2584 if (offset[i] > MLX4_MAILBOX_SIZE) { 2585 ret = -EINVAL; 2586 goto out; 2587 } 2588 2589 MLX4_GET(value[i], outbox, offset[i]); 2590 } 2591 2592 out: 2593 mlx4_free_cmd_mailbox(dev, mailbox); 2594 return ret; 2595 } 2596 EXPORT_SYMBOL(mlx4_query_diag_counters); 2597 2598 int mlx4_get_phys_port_id(struct mlx4_dev *dev) 2599 { 2600 u8 port; 2601 u32 *outbox; 2602 struct mlx4_cmd_mailbox *mailbox; 2603 u32 in_mod; 2604 u32 guid_hi, guid_lo; 2605 int err, ret = 0; 2606 #define MOD_STAT_CFG_PORT_OFFSET 8 2607 #define MOD_STAT_CFG_GUID_H 0X14 2608 #define MOD_STAT_CFG_GUID_L 0X1c 2609 2610 mailbox = mlx4_alloc_cmd_mailbox(dev); 2611 if (IS_ERR(mailbox)) 2612 return PTR_ERR(mailbox); 2613 outbox = mailbox->buf; 2614 2615 for (port = 1; port <= dev->caps.num_ports; port++) { 2616 in_mod = port << MOD_STAT_CFG_PORT_OFFSET; 2617 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2, 2618 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A, 2619 MLX4_CMD_NATIVE); 2620 if (err) { 2621 mlx4_err(dev, "Fail to get port %d uplink guid\n", 2622 port); 2623 ret = err; 2624 } else { 2625 MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H); 2626 MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L); 2627 dev->caps.phys_port_id[port] = (u64)guid_lo | 2628 (u64)guid_hi << 32; 2629 } 2630 } 2631 mlx4_free_cmd_mailbox(dev, mailbox); 2632 return ret; 2633 } 2634 2635 #define MLX4_WOL_SETUP_MODE (5 << 28) 2636 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port) 2637 { 2638 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; 2639 2640 return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3, 2641 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A, 2642 MLX4_CMD_NATIVE); 2643 } 2644 EXPORT_SYMBOL_GPL(mlx4_wol_read); 2645 2646 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port) 2647 { 2648 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; 2649 2650 return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG, 2651 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2652 } 2653 EXPORT_SYMBOL_GPL(mlx4_wol_write); 2654 2655 enum { 2656 ADD_TO_MCG = 0x26, 2657 }; 2658 2659 2660 void mlx4_opreq_action(struct work_struct *work) 2661 { 2662 struct mlx4_priv *priv = container_of(work, struct mlx4_priv, 2663 opreq_task); 2664 struct mlx4_dev *dev = &priv->dev; 2665 int num_tasks = atomic_read(&priv->opreq_count); 2666 struct mlx4_cmd_mailbox *mailbox; 2667 struct mlx4_mgm *mgm; 2668 u32 *outbox; 2669 u32 modifier; 2670 u16 token; 2671 u16 type; 2672 int err; 2673 u32 num_qps; 2674 struct mlx4_qp qp; 2675 int i; 2676 u8 rem_mcg; 2677 u8 prot; 2678 2679 #define GET_OP_REQ_MODIFIER_OFFSET 0x08 2680 #define GET_OP_REQ_TOKEN_OFFSET 0x14 2681 #define GET_OP_REQ_TYPE_OFFSET 0x1a 2682 #define GET_OP_REQ_DATA_OFFSET 0x20 2683 2684 mailbox = mlx4_alloc_cmd_mailbox(dev); 2685 if (IS_ERR(mailbox)) { 2686 mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n"); 2687 return; 2688 } 2689 outbox = mailbox->buf; 2690 2691 while (num_tasks) { 2692 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, 2693 MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, 2694 MLX4_CMD_NATIVE); 2695 if (err) { 2696 mlx4_err(dev, "Failed to retrieve required operation: %d\n", 2697 err); 2698 return; 2699 } 2700 MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET); 2701 MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET); 2702 MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET); 2703 type &= 0xfff; 2704 2705 switch (type) { 2706 case ADD_TO_MCG: 2707 if (dev->caps.steering_mode == 2708 MLX4_STEERING_MODE_DEVICE_MANAGED) { 2709 mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n"); 2710 err = EPERM; 2711 break; 2712 } 2713 mgm = (struct mlx4_mgm *)((u8 *)(outbox) + 2714 GET_OP_REQ_DATA_OFFSET); 2715 num_qps = be32_to_cpu(mgm->members_count) & 2716 MGM_QPN_MASK; 2717 rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1; 2718 prot = ((u8 *)(&mgm->members_count))[0] >> 6; 2719 2720 for (i = 0; i < num_qps; i++) { 2721 qp.qpn = be32_to_cpu(mgm->qp[i]); 2722 if (rem_mcg) 2723 err = mlx4_multicast_detach(dev, &qp, 2724 mgm->gid, 2725 prot, 0); 2726 else 2727 err = mlx4_multicast_attach(dev, &qp, 2728 mgm->gid, 2729 mgm->gid[5] 2730 , 0, prot, 2731 NULL); 2732 if (err) 2733 break; 2734 } 2735 break; 2736 default: 2737 mlx4_warn(dev, "Bad type for required operation\n"); 2738 err = EINVAL; 2739 break; 2740 } 2741 err = mlx4_cmd(dev, 0, ((u32) err | 2742 (__force u32)cpu_to_be32(token) << 16), 2743 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, 2744 MLX4_CMD_NATIVE); 2745 if (err) { 2746 mlx4_err(dev, "Failed to acknowledge required request: %d\n", 2747 err); 2748 goto out; 2749 } 2750 memset(outbox, 0, 0xffc); 2751 num_tasks = atomic_dec_return(&priv->opreq_count); 2752 } 2753 2754 out: 2755 mlx4_free_cmd_mailbox(dev, mailbox); 2756 } 2757 2758 static int mlx4_check_smp_firewall_active(struct mlx4_dev *dev, 2759 struct mlx4_cmd_mailbox *mailbox) 2760 { 2761 #define MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET 0x10 2762 #define MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET 0x20 2763 #define MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET 0x40 2764 #define MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET 0x70 2765 2766 u32 set_attr_mask, getresp_attr_mask; 2767 u32 trap_attr_mask, traprepress_attr_mask; 2768 2769 MLX4_GET(set_attr_mask, mailbox->buf, 2770 MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET); 2771 mlx4_dbg(dev, "SMP firewall set_attribute_mask = 0x%x\n", 2772 set_attr_mask); 2773 2774 MLX4_GET(getresp_attr_mask, mailbox->buf, 2775 MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET); 2776 mlx4_dbg(dev, "SMP firewall getresp_attribute_mask = 0x%x\n", 2777 getresp_attr_mask); 2778 2779 MLX4_GET(trap_attr_mask, mailbox->buf, 2780 MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET); 2781 mlx4_dbg(dev, "SMP firewall trap_attribute_mask = 0x%x\n", 2782 trap_attr_mask); 2783 2784 MLX4_GET(traprepress_attr_mask, mailbox->buf, 2785 MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET); 2786 mlx4_dbg(dev, "SMP firewall traprepress_attribute_mask = 0x%x\n", 2787 traprepress_attr_mask); 2788 2789 if (set_attr_mask && getresp_attr_mask && trap_attr_mask && 2790 traprepress_attr_mask) 2791 return 1; 2792 2793 return 0; 2794 } 2795 2796 int mlx4_config_mad_demux(struct mlx4_dev *dev) 2797 { 2798 struct mlx4_cmd_mailbox *mailbox; 2799 int err; 2800 2801 /* Check if mad_demux is supported */ 2802 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_MAD_DEMUX)) 2803 return 0; 2804 2805 mailbox = mlx4_alloc_cmd_mailbox(dev); 2806 if (IS_ERR(mailbox)) { 2807 mlx4_warn(dev, "Failed to allocate mailbox for cmd MAD_DEMUX"); 2808 return -ENOMEM; 2809 } 2810 2811 /* Query mad_demux to find out which MADs are handled by internal sma */ 2812 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0x01 /* subn mgmt class */, 2813 MLX4_CMD_MAD_DEMUX_QUERY_RESTR, MLX4_CMD_MAD_DEMUX, 2814 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 2815 if (err) { 2816 mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: query restrictions failed (%d)\n", 2817 err); 2818 goto out; 2819 } 2820 2821 if (mlx4_check_smp_firewall_active(dev, mailbox)) 2822 dev->flags |= MLX4_FLAG_SECURE_HOST; 2823 2824 /* Config mad_demux to handle all MADs returned by the query above */ 2825 err = mlx4_cmd(dev, mailbox->dma, 0x01 /* subn mgmt class */, 2826 MLX4_CMD_MAD_DEMUX_CONFIG, MLX4_CMD_MAD_DEMUX, 2827 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 2828 if (err) { 2829 mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: configure failed (%d)\n", err); 2830 goto out; 2831 } 2832 2833 if (dev->flags & MLX4_FLAG_SECURE_HOST) 2834 mlx4_warn(dev, "HCA operating in secure-host mode. SMP firewall activated.\n"); 2835 out: 2836 mlx4_free_cmd_mailbox(dev, mailbox); 2837 return err; 2838 } 2839 2840 /* Access Reg commands */ 2841 enum mlx4_access_reg_masks { 2842 MLX4_ACCESS_REG_STATUS_MASK = 0x7f, 2843 MLX4_ACCESS_REG_METHOD_MASK = 0x7f, 2844 MLX4_ACCESS_REG_LEN_MASK = 0x7ff 2845 }; 2846 2847 struct mlx4_access_reg { 2848 __be16 constant1; 2849 u8 status; 2850 u8 resrvd1; 2851 __be16 reg_id; 2852 u8 method; 2853 u8 constant2; 2854 __be32 resrvd2[2]; 2855 __be16 len_const; 2856 __be16 resrvd3; 2857 #define MLX4_ACCESS_REG_HEADER_SIZE (20) 2858 u8 reg_data[MLX4_MAILBOX_SIZE-MLX4_ACCESS_REG_HEADER_SIZE]; 2859 } __attribute__((__packed__)); 2860 2861 /** 2862 * mlx4_ACCESS_REG - Generic access reg command. 2863 * @dev: mlx4_dev. 2864 * @reg_id: register ID to access. 2865 * @method: Access method Read/Write. 2866 * @reg_len: register length to Read/Write in bytes. 2867 * @reg_data: reg_data pointer to Read/Write From/To. 2868 * 2869 * Access ConnectX registers FW command. 2870 * Returns 0 on success and copies outbox mlx4_access_reg data 2871 * field into reg_data or a negative error code. 2872 */ 2873 static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id, 2874 enum mlx4_access_reg_method method, 2875 u16 reg_len, void *reg_data) 2876 { 2877 struct mlx4_cmd_mailbox *inbox, *outbox; 2878 struct mlx4_access_reg *inbuf, *outbuf; 2879 int err; 2880 2881 inbox = mlx4_alloc_cmd_mailbox(dev); 2882 if (IS_ERR(inbox)) 2883 return PTR_ERR(inbox); 2884 2885 outbox = mlx4_alloc_cmd_mailbox(dev); 2886 if (IS_ERR(outbox)) { 2887 mlx4_free_cmd_mailbox(dev, inbox); 2888 return PTR_ERR(outbox); 2889 } 2890 2891 inbuf = inbox->buf; 2892 outbuf = outbox->buf; 2893 2894 inbuf->constant1 = cpu_to_be16(0x1<<11 | 0x4); 2895 inbuf->constant2 = 0x1; 2896 inbuf->reg_id = cpu_to_be16(reg_id); 2897 inbuf->method = method & MLX4_ACCESS_REG_METHOD_MASK; 2898 2899 reg_len = min(reg_len, (u16)(sizeof(inbuf->reg_data))); 2900 inbuf->len_const = 2901 cpu_to_be16(((reg_len/4 + 1) & MLX4_ACCESS_REG_LEN_MASK) | 2902 ((0x3) << 12)); 2903 2904 memcpy(inbuf->reg_data, reg_data, reg_len); 2905 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0, 2906 MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C, 2907 MLX4_CMD_WRAPPED); 2908 if (err) 2909 goto out; 2910 2911 if (outbuf->status & MLX4_ACCESS_REG_STATUS_MASK) { 2912 err = outbuf->status & MLX4_ACCESS_REG_STATUS_MASK; 2913 mlx4_err(dev, 2914 "MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n", 2915 reg_id, err); 2916 goto out; 2917 } 2918 2919 memcpy(reg_data, outbuf->reg_data, reg_len); 2920 out: 2921 mlx4_free_cmd_mailbox(dev, inbox); 2922 mlx4_free_cmd_mailbox(dev, outbox); 2923 return err; 2924 } 2925 2926 /* ConnectX registers IDs */ 2927 enum mlx4_reg_id { 2928 MLX4_REG_ID_PTYS = 0x5004, 2929 }; 2930 2931 /** 2932 * mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed) 2933 * register 2934 * @dev: mlx4_dev. 2935 * @method: Access method Read/Write. 2936 * @ptys_reg: PTYS register data pointer. 2937 * 2938 * Access ConnectX PTYS register, to Read/Write Port Type/Speed 2939 * configuration 2940 * Returns 0 on success or a negative error code. 2941 */ 2942 int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev, 2943 enum mlx4_access_reg_method method, 2944 struct mlx4_ptys_reg *ptys_reg) 2945 { 2946 return mlx4_ACCESS_REG(dev, MLX4_REG_ID_PTYS, 2947 method, sizeof(*ptys_reg), ptys_reg); 2948 } 2949 EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG); 2950 2951 int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave, 2952 struct mlx4_vhcr *vhcr, 2953 struct mlx4_cmd_mailbox *inbox, 2954 struct mlx4_cmd_mailbox *outbox, 2955 struct mlx4_cmd_info *cmd) 2956 { 2957 struct mlx4_access_reg *inbuf = inbox->buf; 2958 u8 method = inbuf->method & MLX4_ACCESS_REG_METHOD_MASK; 2959 u16 reg_id = be16_to_cpu(inbuf->reg_id); 2960 2961 if (slave != mlx4_master_func_num(dev) && 2962 method == MLX4_ACCESS_REG_WRITE) 2963 return -EPERM; 2964 2965 if (reg_id == MLX4_REG_ID_PTYS) { 2966 struct mlx4_ptys_reg *ptys_reg = 2967 (struct mlx4_ptys_reg *)inbuf->reg_data; 2968 2969 ptys_reg->local_port = 2970 mlx4_slave_convert_port(dev, slave, 2971 ptys_reg->local_port); 2972 } 2973 2974 return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier, 2975 0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C, 2976 MLX4_CMD_NATIVE); 2977 } 2978 2979 static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit) 2980 { 2981 #define SET_PORT_GEN_PHV_VALID 0x10 2982 #define SET_PORT_GEN_PHV_EN 0x80 2983 2984 struct mlx4_cmd_mailbox *mailbox; 2985 struct mlx4_set_port_general_context *context; 2986 u32 in_mod; 2987 int err; 2988 2989 mailbox = mlx4_alloc_cmd_mailbox(dev); 2990 if (IS_ERR(mailbox)) 2991 return PTR_ERR(mailbox); 2992 context = mailbox->buf; 2993 2994 context->v_ignore_fcs |= SET_PORT_GEN_PHV_VALID; 2995 if (phv_bit) 2996 context->phv_en |= SET_PORT_GEN_PHV_EN; 2997 2998 in_mod = MLX4_SET_PORT_GENERAL << 8 | port; 2999 err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, 3000 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 3001 MLX4_CMD_NATIVE); 3002 3003 mlx4_free_cmd_mailbox(dev, mailbox); 3004 return err; 3005 } 3006 3007 int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv) 3008 { 3009 int err; 3010 struct mlx4_func_cap func_cap; 3011 3012 memset(&func_cap, 0, sizeof(func_cap)); 3013 err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap); 3014 if (!err) 3015 *phv = func_cap.flags0 & QUERY_FUNC_CAP_PHV_BIT; 3016 return err; 3017 } 3018 EXPORT_SYMBOL(get_phv_bit); 3019 3020 int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val) 3021 { 3022 int ret; 3023 3024 if (mlx4_is_slave(dev)) 3025 return -EPERM; 3026 3027 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN && 3028 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) { 3029 ret = mlx4_SET_PORT_phv_bit(dev, port, new_val); 3030 if (!ret) 3031 dev->caps.phv_bit[port] = new_val; 3032 return ret; 3033 } 3034 3035 return -EOPNOTSUPP; 3036 } 3037 EXPORT_SYMBOL(set_phv_bit); 3038 3039 void mlx4_replace_zero_macs(struct mlx4_dev *dev) 3040 { 3041 int i; 3042 u8 mac_addr[ETH_ALEN]; 3043 3044 dev->port_random_macs = 0; 3045 for (i = 1; i <= dev->caps.num_ports; ++i) 3046 if (!dev->caps.def_mac[i] && 3047 dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) { 3048 random_ether_addr(mac_addr); 3049 dev->port_random_macs |= 1 << i; 3050 dev->caps.def_mac[i] = mlx4_mac_to_u64(mac_addr); 3051 } 3052 } 3053 EXPORT_SYMBOL_GPL(mlx4_replace_zero_macs); 3054