1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #define LINUXKPI_PARAM_PREFIX mlx4_ 36 37 #include <linux/etherdevice.h> 38 #include <dev/mlx4/cmd.h> 39 #include <linux/module.h> 40 #include <linux/cache.h> 41 42 #include <net/ipv6.h> 43 44 #include "fw.h" 45 #include "icm.h" 46 47 enum { 48 MLX4_COMMAND_INTERFACE_MIN_REV = 2, 49 MLX4_COMMAND_INTERFACE_MAX_REV = 3, 50 MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3, 51 }; 52 53 extern void __buggy_use_of_MLX4_GET(void); 54 extern void __buggy_use_of_MLX4_PUT(void); 55 56 static bool enable_qos; 57 module_param(enable_qos, bool, 0444); 58 MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: off)"); 59 60 #define MLX4_GET(dest, source, offset) \ 61 do { \ 62 void *__p = (char *) (source) + (offset); \ 63 typedef struct { u64 value; } __packed u64_p_t; \ 64 u64 val; \ 65 switch (sizeof (dest)) { \ 66 case 1: (dest) = *(u8 *) __p; break; \ 67 case 2: (dest) = be16_to_cpup(__p); break; \ 68 case 4: (dest) = be32_to_cpup(__p); break; \ 69 case 8: val = ((u64_p_t *)__p)->value; \ 70 (dest) = be64_to_cpu(val); break; \ 71 default: __buggy_use_of_MLX4_GET(); \ 72 } \ 73 } while (0) 74 75 #define MLX4_PUT(dest, source, offset) \ 76 do { \ 77 void *__d = ((char *) (dest) + (offset)); \ 78 switch (sizeof(source)) { \ 79 case 1: *(u8 *) __d = (source); break; \ 80 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \ 81 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \ 82 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \ 83 default: __buggy_use_of_MLX4_PUT(); \ 84 } \ 85 } while (0) 86 87 static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags) 88 { 89 static const char *fname[] = { 90 [ 0] = "RC transport", 91 [ 1] = "UC transport", 92 [ 2] = "UD transport", 93 [ 3] = "XRC transport", 94 [ 6] = "SRQ support", 95 [ 7] = "IPoIB checksum offload", 96 [ 8] = "P_Key violation counter", 97 [ 9] = "Q_Key violation counter", 98 [12] = "Dual Port Different Protocol (DPDP) support", 99 [15] = "Big LSO headers", 100 [16] = "MW support", 101 [17] = "APM support", 102 [18] = "Atomic ops support", 103 [19] = "Raw multicast support", 104 [20] = "Address vector port checking support", 105 [21] = "UD multicast support", 106 [30] = "IBoE support", 107 [32] = "Unicast loopback support", 108 [34] = "FCS header control", 109 [37] = "Wake On LAN (port1) support", 110 [38] = "Wake On LAN (port2) support", 111 [40] = "UDP RSS support", 112 [41] = "Unicast VEP steering support", 113 [42] = "Multicast VEP steering support", 114 [48] = "Counters support", 115 [52] = "RSS IP fragments support", 116 [53] = "Port ETS Scheduler support", 117 [55] = "Port link type sensing support", 118 [59] = "Port management change event support", 119 [61] = "64 byte EQE support", 120 [62] = "64 byte CQE support", 121 }; 122 int i; 123 124 mlx4_dbg(dev, "DEV_CAP flags:\n"); 125 for (i = 0; i < ARRAY_SIZE(fname); ++i) 126 if (fname[i] && (flags & (1LL << i))) 127 mlx4_dbg(dev, " %s\n", fname[i]); 128 } 129 130 static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) 131 { 132 static const char * const fname[] = { 133 [0] = "RSS support", 134 [1] = "RSS Toeplitz Hash Function support", 135 [2] = "RSS XOR Hash Function support", 136 [3] = "Device managed flow steering support", 137 [4] = "Automatic MAC reassignment support", 138 [5] = "Time stamping support", 139 [6] = "VST (control vlan insertion/stripping) support", 140 [7] = "FSM (MAC anti-spoofing) support", 141 [8] = "Dynamic QP updates support", 142 [9] = "Device managed flow steering IPoIB support", 143 [10] = "TCP/IP offloads/flow-steering for VXLAN support", 144 [11] = "MAD DEMUX (Secure-Host) support", 145 [12] = "Large cache line (>64B) CQE stride support", 146 [13] = "Large cache line (>64B) EQE stride support", 147 [14] = "Ethernet protocol control support", 148 [15] = "Ethernet Backplane autoneg support", 149 [16] = "CONFIG DEV support", 150 [17] = "Asymmetric EQs support", 151 [18] = "More than 80 VFs support", 152 [19] = "Performance optimized for limited rule configuration flow steering support", 153 [20] = "Recoverable error events support", 154 [21] = "Port Remap support", 155 [22] = "QCN support", 156 [23] = "QP rate limiting support", 157 [24] = "Ethernet Flow control statistics support", 158 [25] = "Granular QoS per VF support", 159 [26] = "Port ETS Scheduler support", 160 [27] = "Port beacon support", 161 [28] = "RX-ALL support", 162 [29] = "802.1ad offload support", 163 [31] = "Modifying loopback source checks using UPDATE_QP support", 164 [32] = "Loopback source checks support", 165 [33] = "RoCEv2 support", 166 [34] = "DMFS Sniffer support (UC & MC)", 167 [35] = "QinQ VST mode support", 168 [36] = "sl to vl mapping table change event support" 169 }; 170 int i; 171 172 for (i = 0; i < ARRAY_SIZE(fname); ++i) 173 if (fname[i] && (flags & (1LL << i))) 174 mlx4_dbg(dev, " %s\n", fname[i]); 175 } 176 177 int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg) 178 { 179 struct mlx4_cmd_mailbox *mailbox; 180 u32 *inbox; 181 int err = 0; 182 183 #define MOD_STAT_CFG_IN_SIZE 0x100 184 185 #define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002 186 #define MOD_STAT_CFG_PG_SZ_OFFSET 0x003 187 188 mailbox = mlx4_alloc_cmd_mailbox(dev); 189 if (IS_ERR(mailbox)) 190 return PTR_ERR(mailbox); 191 inbox = mailbox->buf; 192 193 MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET); 194 MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET); 195 196 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG, 197 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 198 199 mlx4_free_cmd_mailbox(dev, mailbox); 200 return err; 201 } 202 203 int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave) 204 { 205 struct mlx4_cmd_mailbox *mailbox; 206 u32 *outbox; 207 u8 in_modifier; 208 u8 field; 209 u16 field16; 210 int err; 211 212 #define QUERY_FUNC_BUS_OFFSET 0x00 213 #define QUERY_FUNC_DEVICE_OFFSET 0x01 214 #define QUERY_FUNC_FUNCTION_OFFSET 0x01 215 #define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET 0x03 216 #define QUERY_FUNC_RSVD_EQS_OFFSET 0x04 217 #define QUERY_FUNC_MAX_EQ_OFFSET 0x06 218 #define QUERY_FUNC_RSVD_UARS_OFFSET 0x0b 219 220 mailbox = mlx4_alloc_cmd_mailbox(dev); 221 if (IS_ERR(mailbox)) 222 return PTR_ERR(mailbox); 223 outbox = mailbox->buf; 224 225 in_modifier = slave; 226 227 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, 0, 228 MLX4_CMD_QUERY_FUNC, 229 MLX4_CMD_TIME_CLASS_A, 230 MLX4_CMD_NATIVE); 231 if (err) 232 goto out; 233 234 MLX4_GET(field, outbox, QUERY_FUNC_BUS_OFFSET); 235 func->bus = field & 0xf; 236 MLX4_GET(field, outbox, QUERY_FUNC_DEVICE_OFFSET); 237 func->device = field & 0xf1; 238 MLX4_GET(field, outbox, QUERY_FUNC_FUNCTION_OFFSET); 239 func->function = field & 0x7; 240 MLX4_GET(field, outbox, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET); 241 func->physical_function = field & 0xf; 242 MLX4_GET(field16, outbox, QUERY_FUNC_RSVD_EQS_OFFSET); 243 func->rsvd_eqs = field16 & 0xffff; 244 MLX4_GET(field16, outbox, QUERY_FUNC_MAX_EQ_OFFSET); 245 func->max_eq = field16 & 0xffff; 246 MLX4_GET(field, outbox, QUERY_FUNC_RSVD_UARS_OFFSET); 247 func->rsvd_uars = field & 0x0f; 248 249 mlx4_dbg(dev, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n", 250 func->bus, func->device, func->function, func->physical_function, 251 func->max_eq, func->rsvd_eqs, func->rsvd_uars); 252 253 out: 254 mlx4_free_cmd_mailbox(dev, mailbox); 255 return err; 256 } 257 258 static int mlx4_activate_vst_qinq(struct mlx4_priv *priv, int slave, int port) 259 { 260 struct mlx4_vport_oper_state *vp_oper; 261 struct mlx4_vport_state *vp_admin; 262 int err; 263 264 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 265 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; 266 267 if (vp_admin->default_vlan != vp_oper->state.default_vlan) { 268 err = __mlx4_register_vlan(&priv->dev, port, 269 vp_admin->default_vlan, 270 &vp_oper->vlan_idx); 271 if (err) { 272 vp_oper->vlan_idx = NO_INDX; 273 mlx4_warn(&priv->dev, 274 "No vlan resources slave %d, port %d\n", 275 slave, port); 276 return err; 277 } 278 mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n", 279 (int)(vp_oper->state.default_vlan), 280 vp_oper->vlan_idx, slave, port); 281 } 282 vp_oper->state.vlan_proto = vp_admin->vlan_proto; 283 vp_oper->state.default_vlan = vp_admin->default_vlan; 284 vp_oper->state.default_qos = vp_admin->default_qos; 285 286 return 0; 287 } 288 289 static int mlx4_handle_vst_qinq(struct mlx4_priv *priv, int slave, int port) 290 { 291 struct mlx4_vport_oper_state *vp_oper; 292 struct mlx4_slave_state *slave_state; 293 struct mlx4_vport_state *vp_admin; 294 int err; 295 296 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 297 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; 298 slave_state = &priv->mfunc.master.slave_state[slave]; 299 300 if ((vp_admin->vlan_proto != htons(ETH_P_8021AD)) || 301 (!slave_state->active)) 302 return 0; 303 304 if (vp_oper->state.vlan_proto == vp_admin->vlan_proto && 305 vp_oper->state.default_vlan == vp_admin->default_vlan && 306 vp_oper->state.default_qos == vp_admin->default_qos) 307 return 0; 308 309 if (!slave_state->vst_qinq_supported) { 310 /* Warn and revert the request to set vst QinQ mode */ 311 vp_admin->vlan_proto = vp_oper->state.vlan_proto; 312 vp_admin->default_vlan = vp_oper->state.default_vlan; 313 vp_admin->default_qos = vp_oper->state.default_qos; 314 315 mlx4_warn(&priv->dev, 316 "Slave %d does not support VST QinQ mode\n", slave); 317 return 0; 318 } 319 320 err = mlx4_activate_vst_qinq(priv, slave, port); 321 return err; 322 } 323 324 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, 325 struct mlx4_vhcr *vhcr, 326 struct mlx4_cmd_mailbox *inbox, 327 struct mlx4_cmd_mailbox *outbox, 328 struct mlx4_cmd_info *cmd) 329 { 330 struct mlx4_priv *priv = mlx4_priv(dev); 331 u8 field, port; 332 u32 size, proxy_qp, qkey; 333 int err = 0; 334 struct mlx4_func func; 335 336 #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 337 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 338 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4 339 #define QUERY_FUNC_CAP_FMR_OFFSET 0x8 340 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10 341 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14 342 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18 343 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20 344 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24 345 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28 346 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c 347 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30 348 #define QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET 0x48 349 350 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50 351 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54 352 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58 353 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60 354 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64 355 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68 356 357 #define QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET 0x6c 358 359 #define QUERY_FUNC_CAP_FMR_FLAG 0x80 360 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40 361 #define QUERY_FUNC_CAP_FLAG_ETH 0x80 362 #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10 363 #define QUERY_FUNC_CAP_FLAG_RESD_LKEY 0x08 364 #define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04 365 366 #define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31) 367 #define QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG (1UL << 30) 368 369 /* when opcode modifier = 1 */ 370 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 371 #define QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET 0x4 372 #define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8 373 #define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc 374 375 #define QUERY_FUNC_CAP_QP0_TUNNEL 0x10 376 #define QUERY_FUNC_CAP_QP0_PROXY 0x14 377 #define QUERY_FUNC_CAP_QP1_TUNNEL 0x18 378 #define QUERY_FUNC_CAP_QP1_PROXY 0x1c 379 #define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28 380 381 #define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40 382 #define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80 383 #define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10 384 #define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08 385 386 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80 387 #define QUERY_FUNC_CAP_PHV_BIT 0x40 388 #define QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE 0x20 389 390 #define QUERY_FUNC_CAP_SUPPORTS_VST_QINQ BIT(30) 391 #define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS BIT(31) 392 393 if (vhcr->op_modifier == 1) { 394 struct mlx4_active_ports actv_ports = 395 mlx4_get_active_ports(dev, slave); 396 int converted_port = mlx4_slave_convert_port( 397 dev, slave, vhcr->in_modifier); 398 struct mlx4_vport_oper_state *vp_oper; 399 400 if (converted_port < 0) 401 return -EINVAL; 402 403 vhcr->in_modifier = converted_port; 404 /* phys-port = logical-port */ 405 field = vhcr->in_modifier - 406 find_first_bit(actv_ports.ports, dev->caps.num_ports); 407 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); 408 409 port = vhcr->in_modifier; 410 proxy_qp = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1; 411 412 /* Set nic_info bit to mark new fields support */ 413 field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO; 414 415 if (mlx4_vf_smi_enabled(dev, slave, port) && 416 !mlx4_get_parav_qkey(dev, proxy_qp, &qkey)) { 417 field |= QUERY_FUNC_CAP_VF_ENABLE_QP0; 418 MLX4_PUT(outbox->buf, qkey, 419 QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET); 420 } 421 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET); 422 423 /* size is now the QP number */ 424 size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1; 425 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL); 426 427 size += 2; 428 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL); 429 430 MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP0_PROXY); 431 proxy_qp += 2; 432 MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP1_PROXY); 433 434 MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier], 435 QUERY_FUNC_CAP_PHYS_PORT_ID); 436 437 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 438 err = mlx4_handle_vst_qinq(priv, slave, port); 439 if (err) 440 return err; 441 442 field = 0; 443 if (dev->caps.phv_bit[port]) 444 field |= QUERY_FUNC_CAP_PHV_BIT; 445 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) 446 field |= QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE; 447 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS0_OFFSET); 448 449 } else if (vhcr->op_modifier == 0) { 450 struct mlx4_active_ports actv_ports = 451 mlx4_get_active_ports(dev, slave); 452 struct mlx4_slave_state *slave_state = 453 &priv->mfunc.master.slave_state[slave]; 454 455 /* enable rdma and ethernet interfaces, new quota locations, 456 * and reserved lkey 457 */ 458 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA | 459 QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX | 460 QUERY_FUNC_CAP_FLAG_RESD_LKEY); 461 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); 462 463 field = min( 464 bitmap_weight(actv_ports.ports, dev->caps.num_ports), 465 dev->caps.num_ports); 466 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); 467 468 size = dev->caps.function_caps; /* set PF behaviours */ 469 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET); 470 471 field = 0; /* protected FMR support not available as yet */ 472 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET); 473 474 size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave]; 475 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); 476 size = dev->caps.num_qps; 477 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP); 478 479 size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave]; 480 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); 481 size = dev->caps.num_srqs; 482 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP); 483 484 size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave]; 485 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); 486 size = dev->caps.num_cqs; 487 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP); 488 489 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) || 490 mlx4_QUERY_FUNC(dev, &func, slave)) { 491 size = vhcr->in_modifier & 492 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ? 493 dev->caps.num_eqs : 494 rounddown_pow_of_two(dev->caps.num_eqs); 495 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 496 size = dev->caps.reserved_eqs; 497 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 498 } else { 499 size = vhcr->in_modifier & 500 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ? 501 func.max_eq : 502 rounddown_pow_of_two(func.max_eq); 503 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 504 size = func.rsvd_eqs; 505 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 506 } 507 508 size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave]; 509 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); 510 size = dev->caps.num_mpts; 511 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP); 512 513 size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave]; 514 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); 515 size = dev->caps.num_mtts; 516 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP); 517 518 size = dev->caps.num_mgms + dev->caps.num_amgms; 519 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); 520 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP); 521 522 size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG | 523 QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG; 524 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET); 525 526 size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00); 527 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET); 528 529 if (vhcr->in_modifier & QUERY_FUNC_CAP_SUPPORTS_VST_QINQ) 530 slave_state->vst_qinq_supported = true; 531 532 } else 533 err = -EINVAL; 534 535 return err; 536 } 537 538 int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, 539 struct mlx4_func_cap *func_cap) 540 { 541 struct mlx4_cmd_mailbox *mailbox; 542 u32 *outbox; 543 u8 field, op_modifier; 544 u32 size, qkey; 545 int err = 0, quotas = 0; 546 u32 in_modifier; 547 u32 slave_caps; 548 549 op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */ 550 slave_caps = QUERY_FUNC_CAP_SUPPORTS_VST_QINQ | 551 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS; 552 in_modifier = op_modifier ? gen_or_port : slave_caps; 553 554 mailbox = mlx4_alloc_cmd_mailbox(dev); 555 if (IS_ERR(mailbox)) 556 return PTR_ERR(mailbox); 557 558 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, op_modifier, 559 MLX4_CMD_QUERY_FUNC_CAP, 560 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 561 if (err) 562 goto out; 563 564 outbox = mailbox->buf; 565 566 if (!op_modifier) { 567 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET); 568 if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) { 569 mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n"); 570 err = -EPROTONOSUPPORT; 571 goto out; 572 } 573 func_cap->flags = field; 574 quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS); 575 576 MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); 577 func_cap->num_ports = field; 578 579 MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET); 580 func_cap->pf_context_behaviour = size; 581 582 if (quotas) { 583 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); 584 func_cap->qp_quota = size & 0xFFFFFF; 585 586 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); 587 func_cap->srq_quota = size & 0xFFFFFF; 588 589 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); 590 func_cap->cq_quota = size & 0xFFFFFF; 591 592 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); 593 func_cap->mpt_quota = size & 0xFFFFFF; 594 595 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); 596 func_cap->mtt_quota = size & 0xFFFFFF; 597 598 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); 599 func_cap->mcg_quota = size & 0xFFFFFF; 600 601 } else { 602 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP); 603 func_cap->qp_quota = size & 0xFFFFFF; 604 605 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP); 606 func_cap->srq_quota = size & 0xFFFFFF; 607 608 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP); 609 func_cap->cq_quota = size & 0xFFFFFF; 610 611 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP); 612 func_cap->mpt_quota = size & 0xFFFFFF; 613 614 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP); 615 func_cap->mtt_quota = size & 0xFFFFFF; 616 617 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP); 618 func_cap->mcg_quota = size & 0xFFFFFF; 619 } 620 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 621 func_cap->max_eq = size & 0xFFFFFF; 622 623 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 624 func_cap->reserved_eq = size & 0xFFFFFF; 625 626 if (func_cap->flags & QUERY_FUNC_CAP_FLAG_RESD_LKEY) { 627 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET); 628 func_cap->reserved_lkey = size; 629 } else { 630 func_cap->reserved_lkey = 0; 631 } 632 633 func_cap->extra_flags = 0; 634 635 /* Mailbox data from 0x6c and onward should only be treated if 636 * QUERY_FUNC_CAP_FLAG_VALID_MAILBOX is set in func_cap->flags 637 */ 638 if (func_cap->flags & QUERY_FUNC_CAP_FLAG_VALID_MAILBOX) { 639 MLX4_GET(size, outbox, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET); 640 if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG) 641 func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_BF_RES_QP; 642 if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG) 643 func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_A0_RES_QP; 644 } 645 646 goto out; 647 } 648 649 /* logical port query */ 650 if (gen_or_port > dev->caps.num_ports) { 651 err = -EINVAL; 652 goto out; 653 } 654 655 MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET); 656 if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) { 657 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN) { 658 mlx4_err(dev, "VLAN is enforced on this port\n"); 659 err = -EPROTONOSUPPORT; 660 goto out; 661 } 662 663 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) { 664 mlx4_err(dev, "Force mac is enabled on this port\n"); 665 err = -EPROTONOSUPPORT; 666 goto out; 667 } 668 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) { 669 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET); 670 if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) { 671 mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n"); 672 err = -EPROTONOSUPPORT; 673 goto out; 674 } 675 } 676 677 MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); 678 func_cap->physical_port = field; 679 if (func_cap->physical_port != gen_or_port) { 680 err = -ENOSYS; 681 goto out; 682 } 683 684 if (func_cap->flags1 & QUERY_FUNC_CAP_VF_ENABLE_QP0) { 685 MLX4_GET(qkey, outbox, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET); 686 func_cap->qp0_qkey = qkey; 687 } else { 688 func_cap->qp0_qkey = 0; 689 } 690 691 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL); 692 func_cap->qp0_tunnel_qpn = size & 0xFFFFFF; 693 694 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY); 695 func_cap->qp0_proxy_qpn = size & 0xFFFFFF; 696 697 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL); 698 func_cap->qp1_tunnel_qpn = size & 0xFFFFFF; 699 700 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY); 701 func_cap->qp1_proxy_qpn = size & 0xFFFFFF; 702 703 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO) 704 MLX4_GET(func_cap->phys_port_id, outbox, 705 QUERY_FUNC_CAP_PHYS_PORT_ID); 706 707 MLX4_GET(func_cap->flags0, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET); 708 709 /* All other resources are allocated by the master, but we still report 710 * 'num' and 'reserved' capabilities as follows: 711 * - num remains the maximum resource index 712 * - 'num - reserved' is the total available objects of a resource, but 713 * resource indices may be less than 'reserved' 714 * TODO: set per-resource quotas */ 715 716 out: 717 mlx4_free_cmd_mailbox(dev, mailbox); 718 719 return err; 720 } 721 722 static void disable_unsupported_roce_caps(void *buf); 723 724 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 725 { 726 struct mlx4_cmd_mailbox *mailbox; 727 u32 *outbox; 728 u8 field; 729 u32 field32, flags, ext_flags; 730 u16 size; 731 u16 stat_rate; 732 int err; 733 int i; 734 735 #define QUERY_DEV_CAP_OUT_SIZE 0x100 736 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10 737 #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11 738 #define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12 739 #define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13 740 #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14 741 #define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15 742 #define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16 743 #define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17 744 #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19 745 #define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a 746 #define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b 747 #define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d 748 #define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e 749 #define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f 750 #define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20 751 #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21 752 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22 753 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23 754 #define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET 0x26 755 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27 756 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29 757 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b 758 #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d 759 #define QUERY_DEV_CAP_RSS_OFFSET 0x2e 760 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f 761 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33 762 #define QUERY_DEV_CAP_PORT_BEACON_OFFSET 0x34 763 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35 764 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36 765 #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37 766 #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38 767 #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b 768 #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c 769 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e 770 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f 771 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 772 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 773 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 774 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 775 #define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b 776 #define QUERY_DEV_CAP_BF_OFFSET 0x4c 777 #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d 778 #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e 779 #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f 780 #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51 781 #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52 782 #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55 783 #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56 784 #define QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET 0x5D 785 #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61 786 #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62 787 #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63 788 #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64 789 #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65 790 #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66 791 #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67 792 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68 793 #define QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET 0x70 794 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70 795 #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74 796 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 797 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 798 #define QUERY_DEV_CAP_SL2VL_EVENT_OFFSET 0x78 799 #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a 800 #define QUERY_DEV_CAP_ECN_QCN_VER_OFFSET 0x7b 801 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 802 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 803 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 804 #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86 805 #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88 806 #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a 807 #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c 808 #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e 809 #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90 810 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92 811 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94 812 #define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94 813 #define QUERY_DEV_CAP_PHV_EN_OFFSET 0x96 814 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 815 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 816 #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c 817 #define QUERY_DEV_CAP_DIAG_RPRT_PER_PORT 0x9c 818 #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d 819 #define QUERY_DEV_CAP_VXLAN 0x9e 820 #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0 821 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8 822 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac 823 #define QUERY_DEV_CAP_MAP_CLOCK_TO_USER 0xc1 824 #define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc 825 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0 826 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2 827 828 829 dev_cap->flags2 = 0; 830 mailbox = mlx4_alloc_cmd_mailbox(dev); 831 if (IS_ERR(mailbox)) 832 return PTR_ERR(mailbox); 833 outbox = mailbox->buf; 834 835 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 836 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 837 if (err) 838 goto out; 839 840 if (mlx4_is_mfunc(dev)) 841 disable_unsupported_roce_caps(outbox); 842 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAP_CLOCK_TO_USER); 843 dev_cap->map_clock_to_user = field & 0x80; 844 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET); 845 dev_cap->reserved_qps = 1 << (field & 0xf); 846 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET); 847 dev_cap->max_qps = 1 << (field & 0x1f); 848 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET); 849 dev_cap->reserved_srqs = 1 << (field >> 4); 850 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET); 851 dev_cap->max_srqs = 1 << (field & 0x1f); 852 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET); 853 dev_cap->max_cq_sz = 1 << field; 854 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET); 855 dev_cap->reserved_cqs = 1 << (field & 0xf); 856 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET); 857 dev_cap->max_cqs = 1 << (field & 0x1f); 858 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET); 859 dev_cap->max_mpts = 1 << (field & 0x3f); 860 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET); 861 dev_cap->reserved_eqs = 1 << (field & 0xf); 862 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET); 863 dev_cap->max_eqs = 1 << (field & 0xf); 864 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET); 865 dev_cap->reserved_mtts = 1 << (field >> 4); 866 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET); 867 dev_cap->reserved_mrws = 1 << (field & 0xf); 868 MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET); 869 dev_cap->num_sys_eqs = size & 0xfff; 870 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET); 871 dev_cap->max_requester_per_qp = 1 << (field & 0x3f); 872 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET); 873 dev_cap->max_responder_per_qp = 1 << (field & 0x3f); 874 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET); 875 field &= 0x1f; 876 if (!field) 877 dev_cap->max_gso_sz = 0; 878 else 879 dev_cap->max_gso_sz = 1 << field; 880 881 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET); 882 if (field & 0x20) 883 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR; 884 if (field & 0x10) 885 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP; 886 field &= 0xf; 887 if (field) { 888 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS; 889 dev_cap->max_rss_tbl_sz = 1 << field; 890 } else 891 dev_cap->max_rss_tbl_sz = 0; 892 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET); 893 dev_cap->max_rdma_global = 1 << (field & 0x3f); 894 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET); 895 dev_cap->local_ca_ack_delay = field & 0x1f; 896 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); 897 dev_cap->num_ports = field & 0xf; 898 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET); 899 dev_cap->max_msg_sz = 1 << (field & 0x1f); 900 MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET); 901 if (field & 0x10) 902 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN; 903 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); 904 if (field & 0x80) 905 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN; 906 dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f; 907 if (field & 0x20) 908 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER; 909 MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_BEACON_OFFSET); 910 if (field & 0x80) 911 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_BEACON; 912 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); 913 if (field & 0x80) 914 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB; 915 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET); 916 dev_cap->fs_max_num_qp_per_entry = field; 917 MLX4_GET(field, outbox, QUERY_DEV_CAP_SL2VL_EVENT_OFFSET); 918 if (field & (1 << 5)) 919 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT; 920 MLX4_GET(field, outbox, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); 921 if (field & 0x1) 922 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QCN; 923 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); 924 dev_cap->stat_rate_support = stat_rate; 925 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); 926 if (field & 0x80) 927 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS; 928 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 929 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); 930 dev_cap->flags = flags | (u64)ext_flags << 32; 931 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); 932 dev_cap->reserved_uars = field >> 4; 933 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET); 934 dev_cap->uar_size = 1 << ((field & 0x3f) + 20); 935 MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET); 936 dev_cap->min_page_sz = 1 << field; 937 938 MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET); 939 if (field & 0x80) { 940 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); 941 dev_cap->bf_reg_size = 1 << (field & 0x1f); 942 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); 943 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) 944 field = 3; 945 dev_cap->bf_regs_per_page = 1 << (field & 0x3f); 946 } else { 947 dev_cap->bf_reg_size = 0; 948 } 949 950 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET); 951 dev_cap->max_sq_sg = field; 952 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET); 953 dev_cap->max_sq_desc_sz = size; 954 955 MLX4_GET(field, outbox, QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET); 956 if (field & 0x1) 957 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP; 958 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET); 959 dev_cap->max_qp_per_mcg = 1 << field; 960 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET); 961 dev_cap->reserved_mgms = field & 0xf; 962 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET); 963 dev_cap->max_mcgs = 1 << field; 964 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET); 965 dev_cap->reserved_pds = field >> 4; 966 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET); 967 dev_cap->max_pds = 1 << (field & 0x3f); 968 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET); 969 dev_cap->reserved_xrcds = field >> 4; 970 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET); 971 dev_cap->max_xrcds = 1 << (field & 0x1f); 972 973 MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET); 974 dev_cap->rdmarc_entry_sz = size; 975 MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET); 976 dev_cap->qpc_entry_sz = size; 977 MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET); 978 dev_cap->aux_entry_sz = size; 979 MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET); 980 dev_cap->altc_entry_sz = size; 981 MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET); 982 dev_cap->eqc_entry_sz = size; 983 MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET); 984 dev_cap->cqc_entry_sz = size; 985 MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET); 986 dev_cap->srq_entry_sz = size; 987 MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET); 988 dev_cap->cmpt_entry_sz = size; 989 MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET); 990 dev_cap->mtt_entry_sz = size; 991 MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET); 992 dev_cap->dmpt_entry_sz = size; 993 994 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET); 995 dev_cap->max_srq_sz = 1 << field; 996 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET); 997 dev_cap->max_qp_sz = 1 << field; 998 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET); 999 dev_cap->resize_srq = field & 1; 1000 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET); 1001 dev_cap->max_rq_sg = field; 1002 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); 1003 dev_cap->max_rq_desc_sz = size; 1004 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); 1005 if (field & (1 << 4)) 1006 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QOS_VPP; 1007 if (field & (1 << 5)) 1008 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL; 1009 if (field & (1 << 6)) 1010 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 1011 if (field & (1 << 7)) 1012 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 1013 MLX4_GET(dev_cap->bmme_flags, outbox, 1014 QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1015 if (dev_cap->bmme_flags & MLX4_FLAG_ROCE_V1_V2) 1016 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ROCE_V1_V2; 1017 if (dev_cap->bmme_flags & MLX4_FLAG_PORT_REMAP) 1018 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_REMAP; 1019 MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); 1020 if (field & 0x20) 1021 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV; 1022 if (field & (1 << 2)) 1023 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS; 1024 MLX4_GET(field, outbox, QUERY_DEV_CAP_PHV_EN_OFFSET); 1025 if (field & 0x80) 1026 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PHV_EN; 1027 if (field & 0x40) 1028 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN; 1029 1030 MLX4_GET(dev_cap->reserved_lkey, outbox, 1031 QUERY_DEV_CAP_RSVD_LKEY_OFFSET); 1032 MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET); 1033 if (field32 & (1 << 0)) 1034 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP; 1035 if (field32 & (1 << 7)) 1036 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT; 1037 MLX4_GET(field32, outbox, QUERY_DEV_CAP_DIAG_RPRT_PER_PORT); 1038 if (field32 & (1 << 17)) 1039 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT; 1040 MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); 1041 if (field & (1 << 6)) 1042 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN; 1043 MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN); 1044 if (field & (1 << 3)) 1045 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS; 1046 if (field & (1 << 5)) 1047 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG; 1048 MLX4_GET(dev_cap->max_icm_sz, outbox, 1049 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); 1050 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS) 1051 MLX4_GET(dev_cap->max_counters, outbox, 1052 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET); 1053 1054 MLX4_GET(field32, outbox, 1055 QUERY_DEV_CAP_MAD_DEMUX_OFFSET); 1056 if (field32 & (1 << 0)) 1057 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_MAD_DEMUX; 1058 1059 MLX4_GET(dev_cap->dmfs_high_rate_qpn_base, outbox, 1060 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET); 1061 dev_cap->dmfs_high_rate_qpn_base &= MGM_QPN_MASK; 1062 MLX4_GET(dev_cap->dmfs_high_rate_qpn_range, outbox, 1063 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET); 1064 dev_cap->dmfs_high_rate_qpn_range &= MGM_QPN_MASK; 1065 1066 MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET); 1067 dev_cap->rl_caps.num_rates = size; 1068 if (dev_cap->rl_caps.num_rates) { 1069 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT; 1070 MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET); 1071 dev_cap->rl_caps.max_val = size & 0xfff; 1072 dev_cap->rl_caps.max_unit = size >> 14; 1073 MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET); 1074 dev_cap->rl_caps.min_val = size & 0xfff; 1075 dev_cap->rl_caps.min_unit = size >> 14; 1076 } 1077 1078 MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1079 if (field32 & (1 << 16)) 1080 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP; 1081 if (field32 & (1 << 18)) 1082 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB; 1083 if (field32 & (1 << 19)) 1084 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_LB_SRC_CHK; 1085 if (field32 & (1 << 26)) 1086 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL; 1087 if (field32 & (1 << 20)) 1088 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM; 1089 if (field32 & (1 << 21)) 1090 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS; 1091 1092 for (i = 1; i <= dev_cap->num_ports; i++) { 1093 err = mlx4_QUERY_PORT(dev, i, dev_cap->port_cap + i); 1094 if (err) 1095 goto out; 1096 } 1097 1098 /* 1099 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then 1100 * we can't use any EQs whose doorbell falls on that page, 1101 * even if the EQ itself isn't reserved. 1102 */ 1103 if (dev_cap->num_sys_eqs == 0) 1104 dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4, 1105 dev_cap->reserved_eqs); 1106 else 1107 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS; 1108 1109 out: 1110 mlx4_free_cmd_mailbox(dev, mailbox); 1111 return err; 1112 } 1113 1114 void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 1115 { 1116 if (dev_cap->bf_reg_size > 0) 1117 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", 1118 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); 1119 else 1120 mlx4_dbg(dev, "BlueFlame not available\n"); 1121 1122 mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n", 1123 dev_cap->bmme_flags, dev_cap->reserved_lkey); 1124 mlx4_dbg(dev, "Max ICM size %lld MB\n", 1125 (unsigned long long) dev_cap->max_icm_sz >> 20); 1126 mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", 1127 dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz); 1128 mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", 1129 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz); 1130 mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", 1131 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz); 1132 mlx4_dbg(dev, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n", 1133 dev_cap->num_sys_eqs, dev_cap->max_eqs, dev_cap->reserved_eqs, 1134 dev_cap->eqc_entry_sz); 1135 mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", 1136 dev_cap->reserved_mrws, dev_cap->reserved_mtts); 1137 mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", 1138 dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars); 1139 mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", 1140 dev_cap->max_pds, dev_cap->reserved_mgms); 1141 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", 1142 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); 1143 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", 1144 dev_cap->local_ca_ack_delay, 128 << dev_cap->port_cap[1].ib_mtu, 1145 dev_cap->port_cap[1].max_port_width); 1146 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", 1147 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); 1148 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n", 1149 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg); 1150 mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz); 1151 mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters); 1152 mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz); 1153 mlx4_dbg(dev, "DMFS high rate steer QPn base: %d\n", 1154 dev_cap->dmfs_high_rate_qpn_base); 1155 mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n", 1156 dev_cap->dmfs_high_rate_qpn_range); 1157 1158 if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT) { 1159 struct mlx4_rate_limit_caps *rl_caps = &dev_cap->rl_caps; 1160 1161 mlx4_dbg(dev, "QP Rate-Limit: #rates %d, unit/val max %d/%d, min %d/%d\n", 1162 rl_caps->num_rates, rl_caps->max_unit, rl_caps->max_val, 1163 rl_caps->min_unit, rl_caps->min_val); 1164 } 1165 1166 dump_dev_cap_flags(dev, dev_cap->flags); 1167 dump_dev_cap_flags2(dev, dev_cap->flags2); 1168 } 1169 1170 int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap) 1171 { 1172 struct mlx4_cmd_mailbox *mailbox; 1173 u32 *outbox; 1174 u8 field; 1175 u32 field32; 1176 int err; 1177 1178 mailbox = mlx4_alloc_cmd_mailbox(dev); 1179 if (IS_ERR(mailbox)) 1180 return PTR_ERR(mailbox); 1181 outbox = mailbox->buf; 1182 1183 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 1184 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 1185 MLX4_CMD_TIME_CLASS_A, 1186 MLX4_CMD_NATIVE); 1187 1188 if (err) 1189 goto out; 1190 1191 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); 1192 port_cap->max_vl = field >> 4; 1193 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); 1194 port_cap->ib_mtu = field >> 4; 1195 port_cap->max_port_width = field & 0xf; 1196 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); 1197 port_cap->max_gids = 1 << (field & 0xf); 1198 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET); 1199 port_cap->max_pkeys = 1 << (field & 0xf); 1200 } else { 1201 #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00 1202 #define QUERY_PORT_MTU_OFFSET 0x01 1203 #define QUERY_PORT_ETH_MTU_OFFSET 0x02 1204 #define QUERY_PORT_WIDTH_OFFSET 0x06 1205 #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 1206 #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a 1207 #define QUERY_PORT_MAX_VL_OFFSET 0x0b 1208 #define QUERY_PORT_MAC_OFFSET 0x10 1209 #define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18 1210 #define QUERY_PORT_WAVELENGTH_OFFSET 0x1c 1211 #define QUERY_PORT_TRANS_CODE_OFFSET 0x20 1212 1213 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, MLX4_CMD_QUERY_PORT, 1214 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 1215 if (err) 1216 goto out; 1217 1218 MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET); 1219 port_cap->link_state = (field & 0x80) >> 7; 1220 port_cap->supported_port_types = field & 3; 1221 port_cap->suggested_type = (field >> 3) & 1; 1222 port_cap->default_sense = (field >> 4) & 1; 1223 port_cap->dmfs_optimized_state = (field >> 5) & 1; 1224 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); 1225 port_cap->ib_mtu = field & 0xf; 1226 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); 1227 port_cap->max_port_width = field & 0xf; 1228 MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET); 1229 port_cap->max_gids = 1 << (field >> 4); 1230 port_cap->max_pkeys = 1 << (field & 0xf); 1231 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); 1232 port_cap->max_vl = field & 0xf; 1233 port_cap->max_tc_eth = field >> 4; 1234 MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET); 1235 port_cap->log_max_macs = field & 0xf; 1236 port_cap->log_max_vlans = field >> 4; 1237 MLX4_GET(port_cap->eth_mtu, outbox, QUERY_PORT_ETH_MTU_OFFSET); 1238 MLX4_GET(port_cap->def_mac, outbox, QUERY_PORT_MAC_OFFSET); 1239 MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET); 1240 port_cap->trans_type = field32 >> 24; 1241 port_cap->vendor_oui = field32 & 0xffffff; 1242 MLX4_GET(port_cap->wavelength, outbox, QUERY_PORT_WAVELENGTH_OFFSET); 1243 MLX4_GET(port_cap->trans_code, outbox, QUERY_PORT_TRANS_CODE_OFFSET); 1244 } 1245 1246 out: 1247 mlx4_free_cmd_mailbox(dev, mailbox); 1248 return err; 1249 } 1250 1251 #define DEV_CAP_EXT_2_FLAG_PFC_COUNTERS (1 << 28) 1252 #define DEV_CAP_EXT_2_FLAG_VLAN_CONTROL (1 << 26) 1253 #define DEV_CAP_EXT_2_FLAG_80_VFS (1 << 21) 1254 #define DEV_CAP_EXT_2_FLAG_FSM (1 << 20) 1255 1256 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, 1257 struct mlx4_vhcr *vhcr, 1258 struct mlx4_cmd_mailbox *inbox, 1259 struct mlx4_cmd_mailbox *outbox, 1260 struct mlx4_cmd_info *cmd) 1261 { 1262 u64 flags; 1263 int err = 0; 1264 u8 field; 1265 u16 field16; 1266 u32 bmme_flags, field32; 1267 int real_port; 1268 int slave_port; 1269 int first_port; 1270 struct mlx4_active_ports actv_ports; 1271 1272 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 1273 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1274 if (err) 1275 return err; 1276 1277 disable_unsupported_roce_caps(outbox->buf); 1278 /* add port mng change event capability and disable mw type 1 1279 * unconditionally to slaves 1280 */ 1281 MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 1282 flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV; 1283 flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW; 1284 actv_ports = mlx4_get_active_ports(dev, slave); 1285 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports); 1286 for (slave_port = 0, real_port = first_port; 1287 real_port < first_port + 1288 bitmap_weight(actv_ports.ports, dev->caps.num_ports); 1289 ++real_port, ++slave_port) { 1290 if (flags & (MLX4_DEV_CAP_FLAG_WOL_PORT1 << real_port)) 1291 flags |= MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port; 1292 else 1293 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port); 1294 } 1295 for (; slave_port < dev->caps.num_ports; ++slave_port) 1296 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port); 1297 1298 /* Not exposing RSS IP fragments to guests */ 1299 flags &= ~MLX4_DEV_CAP_FLAG_RSS_IP_FRAG; 1300 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 1301 1302 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET); 1303 field &= ~0x0F; 1304 field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F; 1305 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VL_PORT_OFFSET); 1306 1307 /* For guests, disable timestamp */ 1308 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); 1309 field &= 0x7f; 1310 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); 1311 1312 /* For guests, disable vxlan tunneling and QoS support */ 1313 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN); 1314 field &= 0xd7; 1315 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN); 1316 1317 /* For guests, disable port BEACON */ 1318 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_PORT_BEACON_OFFSET); 1319 field &= 0x7f; 1320 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_PORT_BEACON_OFFSET); 1321 1322 /* For guests, report Blueflame disabled */ 1323 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET); 1324 field &= 0x7f; 1325 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); 1326 1327 /* For guests, disable mw type 2 and port remap*/ 1328 MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1329 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; 1330 bmme_flags &= ~MLX4_FLAG_PORT_REMAP; 1331 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1332 1333 /* turn off device-managed steering capability if not enabled */ 1334 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { 1335 MLX4_GET(field, outbox->buf, 1336 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); 1337 field &= 0x7f; 1338 MLX4_PUT(outbox->buf, field, 1339 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); 1340 } 1341 1342 /* turn off ipoib managed steering for guests */ 1343 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); 1344 field &= ~0x80; 1345 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); 1346 1347 /* turn off host side virt features (VST, FSM, etc) for guests */ 1348 MLX4_GET(field32, outbox->buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1349 field32 &= ~(DEV_CAP_EXT_2_FLAG_VLAN_CONTROL | DEV_CAP_EXT_2_FLAG_80_VFS | 1350 DEV_CAP_EXT_2_FLAG_FSM | DEV_CAP_EXT_2_FLAG_PFC_COUNTERS); 1351 MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1352 1353 /* turn off QCN for guests */ 1354 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); 1355 field &= 0xfe; 1356 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); 1357 1358 /* turn off QP max-rate limiting for guests */ 1359 field16 = 0; 1360 MLX4_PUT(outbox->buf, field16, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET); 1361 1362 /* turn off QoS per VF support for guests */ 1363 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); 1364 field &= 0xef; 1365 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); 1366 1367 /* turn off ignore FCS feature for guests */ 1368 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); 1369 field &= 0xfb; 1370 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); 1371 1372 return 0; 1373 } 1374 1375 static void disable_unsupported_roce_caps(void *buf) 1376 { 1377 u32 flags; 1378 1379 MLX4_GET(flags, buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 1380 flags &= ~(1UL << 31); 1381 MLX4_PUT(buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 1382 MLX4_GET(flags, buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1383 flags &= ~(1UL << 24); 1384 MLX4_PUT(buf, flags, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); 1385 MLX4_GET(flags, buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1386 flags &= ~(MLX4_FLAG_ROCE_V1_V2); 1387 MLX4_PUT(buf, flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1388 } 1389 1390 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, 1391 struct mlx4_vhcr *vhcr, 1392 struct mlx4_cmd_mailbox *inbox, 1393 struct mlx4_cmd_mailbox *outbox, 1394 struct mlx4_cmd_info *cmd) 1395 { 1396 struct mlx4_priv *priv = mlx4_priv(dev); 1397 u64 def_mac; 1398 u8 port_type; 1399 u16 short_field; 1400 int err; 1401 int port = mlx4_slave_convert_port(dev, slave, 1402 vhcr->in_modifier & 0xFF); 1403 1404 #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0 1405 #define MLX4_PORT_LINK_UP_MASK 0x80 1406 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c 1407 #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e 1408 1409 if (port < 0) 1410 return -EINVAL; 1411 1412 /* Protect against untrusted guests: enforce that this is the 1413 * QUERY_PORT general query. 1414 */ 1415 if (vhcr->op_modifier || vhcr->in_modifier & ~0xFF) 1416 return -EINVAL; 1417 1418 vhcr->in_modifier = port; 1419 1420 err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, 1421 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, 1422 MLX4_CMD_NATIVE); 1423 1424 if (!err && dev->caps.function != slave) { 1425 def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac; 1426 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET); 1427 1428 /* get port type - currently only eth is enabled */ 1429 MLX4_GET(port_type, outbox->buf, 1430 QUERY_PORT_SUPPORTED_TYPE_OFFSET); 1431 1432 /* No link sensing allowed */ 1433 port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK; 1434 /* set port type to currently operating port type */ 1435 port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3); 1436 1437 if (0 /* IFLA_VF_LINK_STATE_ENABLE == admin_link_state */) 1438 port_type |= MLX4_PORT_LINK_UP_MASK; 1439 else if (1 /* IFLA_VF_LINK_STATE_DISABLE == admin_link_state */) 1440 port_type &= ~MLX4_PORT_LINK_UP_MASK; 1441 else if (0 /* IFLA_VF_LINK_STATE_AUTO == admin_link_state && mlx4_is_bonded(dev) */) { 1442 int other_port = (port == 1) ? 2 : 1; 1443 struct mlx4_port_cap port_cap; 1444 1445 err = mlx4_QUERY_PORT(dev, other_port, &port_cap); 1446 if (err) 1447 goto out; 1448 port_type |= (port_cap.link_state << 7); 1449 } 1450 1451 MLX4_PUT(outbox->buf, port_type, 1452 QUERY_PORT_SUPPORTED_TYPE_OFFSET); 1453 1454 if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH) 1455 short_field = mlx4_get_slave_num_gids(dev, slave, port); 1456 else 1457 short_field = 1; /* slave max gids */ 1458 MLX4_PUT(outbox->buf, short_field, 1459 QUERY_PORT_CUR_MAX_GID_OFFSET); 1460 1461 short_field = dev->caps.pkey_table_len[vhcr->in_modifier]; 1462 MLX4_PUT(outbox->buf, short_field, 1463 QUERY_PORT_CUR_MAX_PKEY_OFFSET); 1464 } 1465 out: 1466 return err; 1467 } 1468 1469 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port, 1470 int *gid_tbl_len, int *pkey_tbl_len) 1471 { 1472 struct mlx4_cmd_mailbox *mailbox; 1473 u32 *outbox; 1474 u16 field; 1475 int err; 1476 1477 mailbox = mlx4_alloc_cmd_mailbox(dev); 1478 if (IS_ERR(mailbox)) 1479 return PTR_ERR(mailbox); 1480 1481 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, 1482 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, 1483 MLX4_CMD_WRAPPED); 1484 if (err) 1485 goto out; 1486 1487 outbox = mailbox->buf; 1488 1489 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET); 1490 *gid_tbl_len = field; 1491 1492 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET); 1493 *pkey_tbl_len = field; 1494 1495 out: 1496 mlx4_free_cmd_mailbox(dev, mailbox); 1497 return err; 1498 } 1499 EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len); 1500 1501 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) 1502 { 1503 struct mlx4_cmd_mailbox *mailbox; 1504 struct mlx4_icm_iter iter; 1505 __be64 *pages; 1506 int lg; 1507 int nent = 0; 1508 int i; 1509 int err = 0; 1510 int ts = 0, tc = 0; 1511 1512 mailbox = mlx4_alloc_cmd_mailbox(dev); 1513 if (IS_ERR(mailbox)) 1514 return PTR_ERR(mailbox); 1515 pages = mailbox->buf; 1516 1517 for (mlx4_icm_first(icm, &iter); 1518 !mlx4_icm_last(&iter); 1519 mlx4_icm_next(&iter)) { 1520 /* 1521 * We have to pass pages that are aligned to their 1522 * size, so find the least significant 1 in the 1523 * address or size and use that as our log2 size. 1524 */ 1525 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1; 1526 if (lg < MLX4_ICM_PAGE_SHIFT) { 1527 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n", 1528 MLX4_ICM_PAGE_SIZE, 1529 (unsigned long long) mlx4_icm_addr(&iter), 1530 mlx4_icm_size(&iter)); 1531 err = -EINVAL; 1532 goto out; 1533 } 1534 1535 for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) { 1536 if (virt != -1) { 1537 pages[nent * 2] = cpu_to_be64(virt); 1538 virt += 1 << lg; 1539 } 1540 1541 pages[nent * 2 + 1] = 1542 cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) | 1543 (lg - MLX4_ICM_PAGE_SHIFT)); 1544 ts += 1 << (lg - 10); 1545 ++tc; 1546 1547 if (++nent == MLX4_MAILBOX_SIZE / 16) { 1548 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, 1549 MLX4_CMD_TIME_CLASS_B, 1550 MLX4_CMD_NATIVE); 1551 if (err) 1552 goto out; 1553 nent = 0; 1554 } 1555 } 1556 } 1557 1558 if (nent) 1559 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, 1560 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 1561 if (err) 1562 goto out; 1563 1564 switch (op) { 1565 case MLX4_CMD_MAP_FA: 1566 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts); 1567 break; 1568 case MLX4_CMD_MAP_ICM_AUX: 1569 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts); 1570 break; 1571 case MLX4_CMD_MAP_ICM: 1572 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n", 1573 tc, ts, (unsigned long long) virt - (ts << 10)); 1574 break; 1575 } 1576 1577 out: 1578 mlx4_free_cmd_mailbox(dev, mailbox); 1579 return err; 1580 } 1581 1582 int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm) 1583 { 1584 return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1); 1585 } 1586 1587 int mlx4_UNMAP_FA(struct mlx4_dev *dev) 1588 { 1589 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, 1590 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 1591 } 1592 1593 1594 int mlx4_RUN_FW(struct mlx4_dev *dev) 1595 { 1596 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, 1597 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1598 } 1599 1600 int mlx4_QUERY_FW(struct mlx4_dev *dev) 1601 { 1602 struct mlx4_fw *fw = &mlx4_priv(dev)->fw; 1603 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; 1604 struct mlx4_cmd_mailbox *mailbox; 1605 u32 *outbox; 1606 int err = 0; 1607 u64 fw_ver; 1608 u16 cmd_if_rev; 1609 u8 lg; 1610 1611 #define QUERY_FW_OUT_SIZE 0x100 1612 #define QUERY_FW_VER_OFFSET 0x00 1613 #define QUERY_FW_PPF_ID 0x09 1614 #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a 1615 #define QUERY_FW_MAX_CMD_OFFSET 0x0f 1616 #define QUERY_FW_ERR_START_OFFSET 0x30 1617 #define QUERY_FW_ERR_SIZE_OFFSET 0x38 1618 #define QUERY_FW_ERR_BAR_OFFSET 0x3c 1619 1620 #define QUERY_FW_SIZE_OFFSET 0x00 1621 #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20 1622 #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28 1623 1624 #define QUERY_FW_COMM_BASE_OFFSET 0x40 1625 #define QUERY_FW_COMM_BAR_OFFSET 0x48 1626 1627 #define QUERY_FW_CLOCK_OFFSET 0x50 1628 #define QUERY_FW_CLOCK_BAR 0x58 1629 1630 mailbox = mlx4_alloc_cmd_mailbox(dev); 1631 if (IS_ERR(mailbox)) 1632 return PTR_ERR(mailbox); 1633 outbox = mailbox->buf; 1634 1635 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW, 1636 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1637 if (err) 1638 goto out; 1639 1640 MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET); 1641 /* 1642 * FW subminor version is at more significant bits than minor 1643 * version, so swap here. 1644 */ 1645 dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) | 1646 ((fw_ver & 0xffff0000ull) >> 16) | 1647 ((fw_ver & 0x0000ffffull) << 16); 1648 1649 MLX4_GET(lg, outbox, QUERY_FW_PPF_ID); 1650 dev->caps.function = lg; 1651 1652 if (mlx4_is_slave(dev)) 1653 goto out; 1654 1655 1656 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); 1657 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || 1658 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) { 1659 mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n", 1660 cmd_if_rev); 1661 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n", 1662 (int) (dev->caps.fw_ver >> 32), 1663 (int) (dev->caps.fw_ver >> 16) & 0xffff, 1664 (int) dev->caps.fw_ver & 0xffff); 1665 mlx4_err(dev, "This driver version supports only revisions %d to %d\n", 1666 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV); 1667 err = -ENODEV; 1668 goto out; 1669 } 1670 1671 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS) 1672 dev->flags |= MLX4_FLAG_OLD_PORT_CMDS; 1673 1674 MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); 1675 cmd->max_cmds = 1 << lg; 1676 1677 mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n", 1678 (int) (dev->caps.fw_ver >> 32), 1679 (int) (dev->caps.fw_ver >> 16) & 0xffff, 1680 (int) dev->caps.fw_ver & 0xffff, 1681 cmd_if_rev, cmd->max_cmds); 1682 1683 MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET); 1684 MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET); 1685 MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET); 1686 fw->catas_bar = (fw->catas_bar >> 6) * 2; 1687 1688 mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n", 1689 (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar); 1690 1691 MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET); 1692 MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET); 1693 MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET); 1694 fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2; 1695 1696 MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET); 1697 MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET); 1698 fw->comm_bar = (fw->comm_bar >> 6) * 2; 1699 mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n", 1700 fw->comm_bar, (unsigned long long)fw->comm_base); 1701 mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2); 1702 1703 MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET); 1704 MLX4_GET(fw->clock_bar, outbox, QUERY_FW_CLOCK_BAR); 1705 fw->clock_bar = (fw->clock_bar >> 6) * 2; 1706 mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n", 1707 fw->clock_bar, (unsigned long long)fw->clock_offset); 1708 1709 /* 1710 * Round up number of system pages needed in case 1711 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. 1712 */ 1713 #if MLX4_ICM_PAGE_SIZE < PAGE_SIZE 1714 fw->fw_pages = 1715 ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> 1716 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); 1717 #endif 1718 1719 mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n", 1720 (unsigned long long) fw->clr_int_base, fw->clr_int_bar); 1721 1722 out: 1723 mlx4_free_cmd_mailbox(dev, mailbox); 1724 return err; 1725 } 1726 1727 int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave, 1728 struct mlx4_vhcr *vhcr, 1729 struct mlx4_cmd_mailbox *inbox, 1730 struct mlx4_cmd_mailbox *outbox, 1731 struct mlx4_cmd_info *cmd) 1732 { 1733 u8 *outbuf; 1734 int err; 1735 1736 outbuf = outbox->buf; 1737 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW, 1738 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1739 if (err) 1740 return err; 1741 1742 /* for slaves, set pci PPF ID to invalid and zero out everything 1743 * else except FW version */ 1744 outbuf[0] = outbuf[1] = 0; 1745 memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8); 1746 outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID; 1747 1748 return 0; 1749 } 1750 1751 static void get_board_id(void *vsd, char *board_id) 1752 { 1753 int i; 1754 1755 #define VSD_OFFSET_SIG1 0x00 1756 #define VSD_OFFSET_SIG2 0xde 1757 #define VSD_OFFSET_MLX_BOARD_ID 0xd0 1758 #define VSD_OFFSET_TS_BOARD_ID 0x20 1759 1760 #define VSD_SIGNATURE_TOPSPIN 0x5ad 1761 1762 memset(board_id, 0, MLX4_BOARD_ID_LEN); 1763 1764 if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN && 1765 be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) { 1766 strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN); 1767 } else { 1768 /* 1769 * The board ID is a string but the firmware byte 1770 * swaps each 4-byte word before passing it back to 1771 * us. Therefore we need to swab it before printing. 1772 */ 1773 u32 *bid_u32 = (u32 *)board_id; 1774 1775 for (i = 0; i < 4; ++i) { 1776 typedef struct { u32 value; } __packed u64_p_t; 1777 1778 u32 *addr; 1779 u32 val; 1780 1781 addr = (u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4); 1782 val = ((u64_p_t *)addr)->value; 1783 val = swab32(val); 1784 ((u64_p_t *)&bid_u32[i])->value = val; 1785 } 1786 } 1787 } 1788 1789 int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter) 1790 { 1791 struct mlx4_cmd_mailbox *mailbox; 1792 u32 *outbox; 1793 int err; 1794 1795 #define QUERY_ADAPTER_OUT_SIZE 0x100 1796 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 1797 #define QUERY_ADAPTER_VSD_OFFSET 0x20 1798 1799 mailbox = mlx4_alloc_cmd_mailbox(dev); 1800 if (IS_ERR(mailbox)) 1801 return PTR_ERR(mailbox); 1802 outbox = mailbox->buf; 1803 1804 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER, 1805 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1806 if (err) 1807 goto out; 1808 1809 MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); 1810 1811 get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4, 1812 adapter->board_id); 1813 1814 out: 1815 mlx4_free_cmd_mailbox(dev, mailbox); 1816 return err; 1817 } 1818 1819 int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) 1820 { 1821 struct mlx4_cmd_mailbox *mailbox; 1822 __be32 *inbox; 1823 int err; 1824 static const u8 a0_dmfs_hw_steering[] = { 1825 [MLX4_STEERING_DMFS_A0_DEFAULT] = 0, 1826 [MLX4_STEERING_DMFS_A0_DYNAMIC] = 1, 1827 [MLX4_STEERING_DMFS_A0_STATIC] = 2, 1828 [MLX4_STEERING_DMFS_A0_DISABLE] = 3 1829 }; 1830 1831 #define INIT_HCA_IN_SIZE 0x200 1832 #define INIT_HCA_VERSION_OFFSET 0x000 1833 #define INIT_HCA_VERSION 2 1834 #define INIT_HCA_VXLAN_OFFSET 0x0c 1835 #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e 1836 #define INIT_HCA_FLAGS_OFFSET 0x014 1837 #define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018 1838 #define INIT_HCA_QPC_OFFSET 0x020 1839 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) 1840 #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) 1841 #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28) 1842 #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f) 1843 #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30) 1844 #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37) 1845 #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38) 1846 #define INIT_HCA_EQE_CQE_STRIDE_OFFSET (INIT_HCA_QPC_OFFSET + 0x3b) 1847 #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40) 1848 #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) 1849 #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) 1850 #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67) 1851 #define INIT_HCA_NUM_SYS_EQS_OFFSET (INIT_HCA_QPC_OFFSET + 0x6a) 1852 #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70) 1853 #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77) 1854 #define INIT_HCA_MCAST_OFFSET 0x0c0 1855 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) 1856 #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) 1857 #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) 1858 #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18) 1859 #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) 1860 #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6 1861 #define INIT_HCA_FS_PARAM_OFFSET 0x1d0 1862 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00) 1863 #define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12) 1864 #define INIT_HCA_FS_A0_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x18) 1865 #define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b) 1866 #define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21) 1867 #define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22) 1868 #define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25) 1869 #define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26) 1870 #define INIT_HCA_TPT_OFFSET 0x0f0 1871 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) 1872 #define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08) 1873 #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) 1874 #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10) 1875 #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18) 1876 #define INIT_HCA_UAR_OFFSET 0x120 1877 #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a) 1878 #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b) 1879 1880 mailbox = mlx4_alloc_cmd_mailbox(dev); 1881 if (IS_ERR(mailbox)) 1882 return PTR_ERR(mailbox); 1883 inbox = mailbox->buf; 1884 1885 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; 1886 1887 *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) = 1888 (ilog2(cache_line_size()) - 4) << 5; 1889 1890 #if defined(__LITTLE_ENDIAN) 1891 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); 1892 #elif defined(__BIG_ENDIAN) 1893 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1); 1894 #else 1895 #error Host endianness not defined 1896 #endif 1897 /* Check port for UD address vector: */ 1898 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1); 1899 1900 /* Enable IPoIB checksumming if we can: */ 1901 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) 1902 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3); 1903 1904 /* Enable QoS support if module parameter set */ 1905 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG && enable_qos) 1906 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2); 1907 1908 /* enable counters */ 1909 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS) 1910 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4); 1911 1912 /* Enable RSS spread to fragmented IP packets when supported */ 1913 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_RSS_IP_FRAG) 1914 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 13); 1915 1916 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ 1917 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) { 1918 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29); 1919 dev->caps.eqe_size = 64; 1920 dev->caps.eqe_factor = 1; 1921 } else { 1922 dev->caps.eqe_size = 32; 1923 dev->caps.eqe_factor = 0; 1924 } 1925 1926 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) { 1927 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30); 1928 dev->caps.cqe_size = 64; 1929 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 1930 } else { 1931 dev->caps.cqe_size = 32; 1932 } 1933 1934 #if 0 1935 /* XXX not currently supported by the FreeBSD's mlxen */ 1936 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */ 1937 if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) && 1938 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) { 1939 dev->caps.eqe_size = cache_line_size(); 1940 dev->caps.cqe_size = cache_line_size(); 1941 dev->caps.eqe_factor = 0; 1942 MLX4_PUT(inbox, (u8)((ilog2(dev->caps.eqe_size) - 5) << 4 | 1943 (ilog2(dev->caps.eqe_size) - 5)), 1944 INIT_HCA_EQE_CQE_STRIDE_OFFSET); 1945 1946 /* User still need to know to support CQE > 32B */ 1947 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 1948 } 1949 #endif 1950 1951 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) 1952 *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1U << 31); 1953 1954 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 1955 1956 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); 1957 MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET); 1958 MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET); 1959 MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET); 1960 MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET); 1961 MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET); 1962 MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET); 1963 MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET); 1964 MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET); 1965 MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET); 1966 MLX4_PUT(inbox, param->num_sys_eqs, INIT_HCA_NUM_SYS_EQS_OFFSET); 1967 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET); 1968 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET); 1969 1970 /* steering attributes */ 1971 if (dev->caps.steering_mode == 1972 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1973 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= 1974 cpu_to_be32(1 << 1975 INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN); 1976 1977 MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET); 1978 MLX4_PUT(inbox, param->log_mc_entry_sz, 1979 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); 1980 MLX4_PUT(inbox, param->log_mc_table_sz, 1981 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); 1982 /* Enable Ethernet flow steering 1983 * with udp unicast and tcp unicast 1984 */ 1985 if (dev->caps.dmfs_high_steer_mode != 1986 MLX4_STEERING_DMFS_A0_STATIC) 1987 MLX4_PUT(inbox, 1988 (u8)(MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN), 1989 INIT_HCA_FS_ETH_BITS_OFFSET); 1990 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, 1991 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET); 1992 /* Enable IPoIB flow steering 1993 * with udp unicast and tcp unicast 1994 */ 1995 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN), 1996 INIT_HCA_FS_IB_BITS_OFFSET); 1997 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, 1998 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET); 1999 2000 if (dev->caps.dmfs_high_steer_mode != 2001 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 2002 MLX4_PUT(inbox, 2003 ((u8)(a0_dmfs_hw_steering[dev->caps.dmfs_high_steer_mode] 2004 << 6)), 2005 INIT_HCA_FS_A0_OFFSET); 2006 } else { 2007 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); 2008 MLX4_PUT(inbox, param->log_mc_entry_sz, 2009 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 2010 MLX4_PUT(inbox, param->log_mc_hash_sz, 2011 INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 2012 MLX4_PUT(inbox, param->log_mc_table_sz, 2013 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 2014 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0) 2015 MLX4_PUT(inbox, (u8) (1 << 3), 2016 INIT_HCA_UC_STEERING_OFFSET); 2017 } 2018 2019 /* TPT attributes */ 2020 2021 MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET); 2022 MLX4_PUT(inbox, param->mw_enabled, INIT_HCA_TPT_MW_OFFSET); 2023 MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET); 2024 MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET); 2025 MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET); 2026 2027 /* UAR attributes */ 2028 2029 MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET); 2030 MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET); 2031 2032 /* set parser VXLAN attributes */ 2033 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) { 2034 u8 parser_params = 0; 2035 MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET); 2036 } 2037 2038 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 2039 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 2040 2041 if (err) 2042 mlx4_err(dev, "INIT_HCA returns %d\n", err); 2043 2044 mlx4_free_cmd_mailbox(dev, mailbox); 2045 return err; 2046 } 2047 2048 int mlx4_QUERY_HCA(struct mlx4_dev *dev, 2049 struct mlx4_init_hca_param *param) 2050 { 2051 struct mlx4_cmd_mailbox *mailbox; 2052 __be32 *outbox; 2053 u32 dword_field; 2054 int err; 2055 u8 byte_field; 2056 static const u8 a0_dmfs_query_hw_steering[] = { 2057 [0] = MLX4_STEERING_DMFS_A0_DEFAULT, 2058 [1] = MLX4_STEERING_DMFS_A0_DYNAMIC, 2059 [2] = MLX4_STEERING_DMFS_A0_STATIC, 2060 [3] = MLX4_STEERING_DMFS_A0_DISABLE 2061 }; 2062 2063 #define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04 2064 #define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c 2065 2066 mailbox = mlx4_alloc_cmd_mailbox(dev); 2067 if (IS_ERR(mailbox)) 2068 return PTR_ERR(mailbox); 2069 outbox = mailbox->buf; 2070 2071 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, 2072 MLX4_CMD_QUERY_HCA, 2073 MLX4_CMD_TIME_CLASS_B, 2074 !mlx4_is_slave(dev)); 2075 if (err) 2076 goto out; 2077 2078 MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET); 2079 MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET); 2080 2081 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 2082 2083 MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET); 2084 MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET); 2085 MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET); 2086 MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET); 2087 MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET); 2088 MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET); 2089 MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET); 2090 MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET); 2091 MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET); 2092 MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET); 2093 MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET); 2094 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); 2095 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); 2096 2097 MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET); 2098 if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) { 2099 param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; 2100 } else { 2101 MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET); 2102 if (byte_field & 0x8) 2103 param->steering_mode = MLX4_STEERING_MODE_B0; 2104 else 2105 param->steering_mode = MLX4_STEERING_MODE_A0; 2106 } 2107 2108 if (dword_field & (1 << 13)) 2109 param->rss_ip_frags = 1; 2110 2111 /* steering attributes */ 2112 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 2113 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); 2114 MLX4_GET(param->log_mc_entry_sz, outbox, 2115 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); 2116 MLX4_GET(param->log_mc_table_sz, outbox, 2117 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); 2118 MLX4_GET(byte_field, outbox, 2119 INIT_HCA_FS_A0_OFFSET); 2120 param->dmfs_high_steer_mode = 2121 a0_dmfs_query_hw_steering[(byte_field >> 6) & 3]; 2122 } else { 2123 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); 2124 MLX4_GET(param->log_mc_entry_sz, outbox, 2125 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 2126 MLX4_GET(param->log_mc_hash_sz, outbox, 2127 INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 2128 MLX4_GET(param->log_mc_table_sz, outbox, 2129 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 2130 } 2131 2132 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ 2133 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS); 2134 if (byte_field & 0x20) /* 64-bytes eqe enabled */ 2135 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED; 2136 if (byte_field & 0x40) /* 64-bytes cqe enabled */ 2137 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED; 2138 2139 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */ 2140 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET); 2141 if (byte_field) { 2142 param->dev_cap_enabled |= MLX4_DEV_CAP_EQE_STRIDE_ENABLED; 2143 param->dev_cap_enabled |= MLX4_DEV_CAP_CQE_STRIDE_ENABLED; 2144 param->cqe_size = 1 << ((byte_field & 2145 MLX4_CQE_SIZE_MASK_STRIDE) + 5); 2146 param->eqe_size = 1 << (((byte_field & 2147 MLX4_EQE_SIZE_MASK_STRIDE) >> 4) + 5); 2148 } 2149 2150 /* TPT attributes */ 2151 2152 MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); 2153 MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET); 2154 MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); 2155 MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET); 2156 MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET); 2157 2158 /* UAR attributes */ 2159 2160 MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET); 2161 MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); 2162 2163 /* phv_check enable */ 2164 MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET); 2165 if (byte_field & 0x2) 2166 param->phv_check_en = 1; 2167 out: 2168 mlx4_free_cmd_mailbox(dev, mailbox); 2169 2170 return err; 2171 } 2172 2173 static int mlx4_hca_core_clock_update(struct mlx4_dev *dev) 2174 { 2175 struct mlx4_cmd_mailbox *mailbox; 2176 __be32 *outbox; 2177 int err; 2178 2179 mailbox = mlx4_alloc_cmd_mailbox(dev); 2180 if (IS_ERR(mailbox)) { 2181 mlx4_warn(dev, "hca_core_clock mailbox allocation failed\n"); 2182 return PTR_ERR(mailbox); 2183 } 2184 outbox = mailbox->buf; 2185 2186 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, 2187 MLX4_CMD_QUERY_HCA, 2188 MLX4_CMD_TIME_CLASS_B, 2189 !mlx4_is_slave(dev)); 2190 if (err) { 2191 mlx4_warn(dev, "hca_core_clock update failed\n"); 2192 goto out; 2193 } 2194 2195 MLX4_GET(dev->caps.hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET); 2196 2197 out: 2198 mlx4_free_cmd_mailbox(dev, mailbox); 2199 2200 return err; 2201 } 2202 2203 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0 2204 * and real QP0 are active, so that the paravirtualized QP0 is ready 2205 * to operate */ 2206 static int check_qp0_state(struct mlx4_dev *dev, int function, int port) 2207 { 2208 struct mlx4_priv *priv = mlx4_priv(dev); 2209 /* irrelevant if not infiniband */ 2210 if (priv->mfunc.master.qp0_state[port].proxy_qp0_active && 2211 priv->mfunc.master.qp0_state[port].qp0_active) 2212 return 1; 2213 return 0; 2214 } 2215 2216 int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave, 2217 struct mlx4_vhcr *vhcr, 2218 struct mlx4_cmd_mailbox *inbox, 2219 struct mlx4_cmd_mailbox *outbox, 2220 struct mlx4_cmd_info *cmd) 2221 { 2222 struct mlx4_priv *priv = mlx4_priv(dev); 2223 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier); 2224 int err; 2225 2226 if (port < 0) 2227 return -EINVAL; 2228 2229 if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port)) 2230 return 0; 2231 2232 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { 2233 /* Enable port only if it was previously disabled */ 2234 if (!priv->mfunc.master.init_port_ref[port]) { 2235 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, 2236 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2237 if (err) 2238 return err; 2239 } 2240 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); 2241 } else { 2242 if (slave == mlx4_master_func_num(dev)) { 2243 if (check_qp0_state(dev, slave, port) && 2244 !priv->mfunc.master.qp0_state[port].port_active) { 2245 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, 2246 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2247 if (err) 2248 return err; 2249 priv->mfunc.master.qp0_state[port].port_active = 1; 2250 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); 2251 } 2252 } else 2253 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); 2254 } 2255 ++priv->mfunc.master.init_port_ref[port]; 2256 return 0; 2257 } 2258 2259 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) 2260 { 2261 struct mlx4_cmd_mailbox *mailbox; 2262 u32 *inbox; 2263 int err; 2264 u32 flags; 2265 u16 field; 2266 2267 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 2268 #define INIT_PORT_IN_SIZE 256 2269 #define INIT_PORT_FLAGS_OFFSET 0x00 2270 #define INIT_PORT_FLAG_SIG (1 << 18) 2271 #define INIT_PORT_FLAG_NG (1 << 17) 2272 #define INIT_PORT_FLAG_G0 (1 << 16) 2273 #define INIT_PORT_VL_SHIFT 4 2274 #define INIT_PORT_PORT_WIDTH_SHIFT 8 2275 #define INIT_PORT_MTU_OFFSET 0x04 2276 #define INIT_PORT_MAX_GID_OFFSET 0x06 2277 #define INIT_PORT_MAX_PKEY_OFFSET 0x0a 2278 #define INIT_PORT_GUID0_OFFSET 0x10 2279 #define INIT_PORT_NODE_GUID_OFFSET 0x18 2280 #define INIT_PORT_SI_GUID_OFFSET 0x20 2281 2282 mailbox = mlx4_alloc_cmd_mailbox(dev); 2283 if (IS_ERR(mailbox)) 2284 return PTR_ERR(mailbox); 2285 inbox = mailbox->buf; 2286 2287 flags = 0; 2288 flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT; 2289 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; 2290 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET); 2291 2292 field = 128 << dev->caps.ib_mtu_cap[port]; 2293 MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET); 2294 field = dev->caps.gid_table_len[port]; 2295 MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET); 2296 field = dev->caps.pkey_table_len[port]; 2297 MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET); 2298 2299 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT, 2300 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2301 2302 mlx4_free_cmd_mailbox(dev, mailbox); 2303 } else 2304 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, 2305 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2306 2307 if (!err) 2308 mlx4_hca_core_clock_update(dev); 2309 2310 return err; 2311 } 2312 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT); 2313 2314 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, 2315 struct mlx4_vhcr *vhcr, 2316 struct mlx4_cmd_mailbox *inbox, 2317 struct mlx4_cmd_mailbox *outbox, 2318 struct mlx4_cmd_info *cmd) 2319 { 2320 struct mlx4_priv *priv = mlx4_priv(dev); 2321 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier); 2322 int err; 2323 2324 if (port < 0) 2325 return -EINVAL; 2326 2327 if (!(priv->mfunc.master.slave_state[slave].init_port_mask & 2328 (1 << port))) 2329 return 0; 2330 2331 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { 2332 if (priv->mfunc.master.init_port_ref[port] == 1) { 2333 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 2334 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2335 if (err) 2336 return err; 2337 } 2338 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); 2339 } else { 2340 /* infiniband port */ 2341 if (slave == mlx4_master_func_num(dev)) { 2342 if (!priv->mfunc.master.qp0_state[port].qp0_active && 2343 priv->mfunc.master.qp0_state[port].port_active) { 2344 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 2345 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2346 if (err) 2347 return err; 2348 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); 2349 priv->mfunc.master.qp0_state[port].port_active = 0; 2350 } 2351 } else 2352 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); 2353 } 2354 --priv->mfunc.master.init_port_ref[port]; 2355 return 0; 2356 } 2357 2358 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port) 2359 { 2360 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 2361 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2362 } 2363 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT); 2364 2365 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) 2366 { 2367 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 2368 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 2369 } 2370 2371 struct mlx4_config_dev { 2372 __be32 update_flags; 2373 __be32 rsvd1[3]; 2374 __be16 vxlan_udp_dport; 2375 __be16 rsvd2; 2376 __be16 roce_v2_entropy; 2377 __be16 roce_v2_udp_dport; 2378 __be32 roce_flags; 2379 __be32 rsvd4[25]; 2380 __be16 rsvd5; 2381 u8 rsvd6; 2382 u8 rx_checksum_val; 2383 }; 2384 2385 #define MLX4_VXLAN_UDP_DPORT (1 << 0) 2386 #define MLX4_ROCE_V2_UDP_DPORT BIT(3) 2387 #define MLX4_DISABLE_RX_PORT BIT(18) 2388 2389 static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) 2390 { 2391 int err; 2392 struct mlx4_cmd_mailbox *mailbox; 2393 2394 mailbox = mlx4_alloc_cmd_mailbox(dev); 2395 if (IS_ERR(mailbox)) 2396 return PTR_ERR(mailbox); 2397 2398 memcpy(mailbox->buf, config_dev, sizeof(*config_dev)); 2399 2400 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_CONFIG_DEV, 2401 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 2402 2403 mlx4_free_cmd_mailbox(dev, mailbox); 2404 return err; 2405 } 2406 2407 static int mlx4_CONFIG_DEV_get(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) 2408 { 2409 int err; 2410 struct mlx4_cmd_mailbox *mailbox; 2411 2412 mailbox = mlx4_alloc_cmd_mailbox(dev); 2413 if (IS_ERR(mailbox)) 2414 return PTR_ERR(mailbox); 2415 2416 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 1, MLX4_CMD_CONFIG_DEV, 2417 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2418 2419 if (!err) 2420 memcpy(config_dev, mailbox->buf, sizeof(*config_dev)); 2421 2422 mlx4_free_cmd_mailbox(dev, mailbox); 2423 return err; 2424 } 2425 2426 /* Conversion between the HW values and the actual functionality. 2427 * The value represented by the array index, 2428 * and the functionality determined by the flags. 2429 */ 2430 static const u8 config_dev_csum_flags[] = { 2431 [0] = 0, 2432 [1] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP, 2433 [2] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP | 2434 MLX4_RX_CSUM_MODE_L4, 2435 [3] = MLX4_RX_CSUM_MODE_L4 | 2436 MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP | 2437 MLX4_RX_CSUM_MODE_MULTI_VLAN 2438 }; 2439 2440 int mlx4_config_dev_retrieval(struct mlx4_dev *dev, 2441 struct mlx4_config_dev_params *params) 2442 { 2443 struct mlx4_config_dev config_dev = {0}; 2444 int err; 2445 u8 csum_mask; 2446 2447 #define CONFIG_DEV_RX_CSUM_MODE_MASK 0x7 2448 #define CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET 0 2449 #define CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET 4 2450 2451 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CONFIG_DEV)) 2452 return -ENOTSUPP; 2453 2454 err = mlx4_CONFIG_DEV_get(dev, &config_dev); 2455 if (err) 2456 return err; 2457 2458 csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET) & 2459 CONFIG_DEV_RX_CSUM_MODE_MASK; 2460 2461 if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0])) 2462 return -EINVAL; 2463 params->rx_csum_flags_port_1 = config_dev_csum_flags[csum_mask]; 2464 2465 csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET) & 2466 CONFIG_DEV_RX_CSUM_MODE_MASK; 2467 2468 if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0])) 2469 return -EINVAL; 2470 params->rx_csum_flags_port_2 = config_dev_csum_flags[csum_mask]; 2471 2472 params->vxlan_udp_dport = be16_to_cpu(config_dev.vxlan_udp_dport); 2473 2474 return 0; 2475 } 2476 EXPORT_SYMBOL_GPL(mlx4_config_dev_retrieval); 2477 2478 int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port) 2479 { 2480 struct mlx4_config_dev config_dev; 2481 2482 memset(&config_dev, 0, sizeof(config_dev)); 2483 config_dev.update_flags = cpu_to_be32(MLX4_VXLAN_UDP_DPORT); 2484 config_dev.vxlan_udp_dport = udp_port; 2485 2486 return mlx4_CONFIG_DEV_set(dev, &config_dev); 2487 } 2488 EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port); 2489 2490 #define CONFIG_DISABLE_RX_PORT BIT(15) 2491 int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis) 2492 { 2493 struct mlx4_config_dev config_dev; 2494 2495 memset(&config_dev, 0, sizeof(config_dev)); 2496 config_dev.update_flags = cpu_to_be32(MLX4_DISABLE_RX_PORT); 2497 if (dis) 2498 config_dev.roce_flags = 2499 cpu_to_be32(CONFIG_DISABLE_RX_PORT); 2500 2501 return mlx4_CONFIG_DEV_set(dev, &config_dev); 2502 } 2503 2504 int mlx4_config_roce_v2_port(struct mlx4_dev *dev, u16 udp_port) 2505 { 2506 struct mlx4_config_dev config_dev; 2507 2508 memset(&config_dev, 0, sizeof(config_dev)); 2509 config_dev.update_flags = cpu_to_be32(MLX4_ROCE_V2_UDP_DPORT); 2510 config_dev.roce_v2_udp_dport = cpu_to_be16(udp_port); 2511 2512 return mlx4_CONFIG_DEV_set(dev, &config_dev); 2513 } 2514 EXPORT_SYMBOL_GPL(mlx4_config_roce_v2_port); 2515 2516 int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2) 2517 { 2518 struct mlx4_cmd_mailbox *mailbox; 2519 struct { 2520 __be32 v_port1; 2521 __be32 v_port2; 2522 } *v2p; 2523 int err; 2524 2525 mailbox = mlx4_alloc_cmd_mailbox(dev); 2526 if (IS_ERR(mailbox)) 2527 return -ENOMEM; 2528 2529 v2p = mailbox->buf; 2530 v2p->v_port1 = cpu_to_be32(port1); 2531 v2p->v_port2 = cpu_to_be32(port2); 2532 2533 err = mlx4_cmd(dev, mailbox->dma, 0, 2534 MLX4_SET_PORT_VIRT2PHY, MLX4_CMD_VIRT_PORT_MAP, 2535 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 2536 2537 mlx4_free_cmd_mailbox(dev, mailbox); 2538 return err; 2539 } 2540 2541 2542 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) 2543 { 2544 int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0, 2545 MLX4_CMD_SET_ICM_SIZE, 2546 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2547 if (ret) 2548 return ret; 2549 2550 /* 2551 * Round up number of system pages needed in case 2552 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. 2553 */ 2554 #if MLX4_ICM_PAGE_SIZE < PAGE_SIZE 2555 *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> 2556 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); 2557 #endif 2558 2559 return 0; 2560 } 2561 2562 int mlx4_NOP(struct mlx4_dev *dev) 2563 { 2564 /* Input modifier of 0x1f means "finish as soon as possible." */ 2565 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A, 2566 MLX4_CMD_NATIVE); 2567 } 2568 2569 int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier, 2570 const u32 offset[], 2571 u32 value[], size_t array_len, u8 port) 2572 { 2573 struct mlx4_cmd_mailbox *mailbox; 2574 u32 *outbox; 2575 size_t i; 2576 int ret; 2577 2578 mailbox = mlx4_alloc_cmd_mailbox(dev); 2579 if (IS_ERR(mailbox)) 2580 return PTR_ERR(mailbox); 2581 2582 outbox = mailbox->buf; 2583 2584 ret = mlx4_cmd_box(dev, 0, mailbox->dma, port, op_modifier, 2585 MLX4_CMD_DIAG_RPRT, MLX4_CMD_TIME_CLASS_A, 2586 MLX4_CMD_NATIVE); 2587 if (ret) 2588 goto out; 2589 2590 for (i = 0; i < array_len; i++) { 2591 if (offset[i] > MLX4_MAILBOX_SIZE) { 2592 ret = -EINVAL; 2593 goto out; 2594 } 2595 2596 MLX4_GET(value[i], outbox, offset[i]); 2597 } 2598 2599 out: 2600 mlx4_free_cmd_mailbox(dev, mailbox); 2601 return ret; 2602 } 2603 EXPORT_SYMBOL(mlx4_query_diag_counters); 2604 2605 int mlx4_get_phys_port_id(struct mlx4_dev *dev) 2606 { 2607 u8 port; 2608 u32 *outbox; 2609 struct mlx4_cmd_mailbox *mailbox; 2610 u32 in_mod; 2611 u32 guid_hi, guid_lo; 2612 int err, ret = 0; 2613 #define MOD_STAT_CFG_PORT_OFFSET 8 2614 #define MOD_STAT_CFG_GUID_H 0X14 2615 #define MOD_STAT_CFG_GUID_L 0X1c 2616 2617 mailbox = mlx4_alloc_cmd_mailbox(dev); 2618 if (IS_ERR(mailbox)) 2619 return PTR_ERR(mailbox); 2620 outbox = mailbox->buf; 2621 2622 for (port = 1; port <= dev->caps.num_ports; port++) { 2623 in_mod = port << MOD_STAT_CFG_PORT_OFFSET; 2624 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2, 2625 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A, 2626 MLX4_CMD_NATIVE); 2627 if (err) { 2628 mlx4_err(dev, "Fail to get port %d uplink guid\n", 2629 port); 2630 ret = err; 2631 } else { 2632 MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H); 2633 MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L); 2634 dev->caps.phys_port_id[port] = (u64)guid_lo | 2635 (u64)guid_hi << 32; 2636 } 2637 } 2638 mlx4_free_cmd_mailbox(dev, mailbox); 2639 return ret; 2640 } 2641 2642 #define MLX4_WOL_SETUP_MODE (5 << 28) 2643 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port) 2644 { 2645 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; 2646 2647 return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3, 2648 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A, 2649 MLX4_CMD_NATIVE); 2650 } 2651 EXPORT_SYMBOL_GPL(mlx4_wol_read); 2652 2653 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port) 2654 { 2655 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; 2656 2657 return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG, 2658 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2659 } 2660 EXPORT_SYMBOL_GPL(mlx4_wol_write); 2661 2662 enum { 2663 ADD_TO_MCG = 0x26, 2664 }; 2665 2666 2667 void mlx4_opreq_action(struct work_struct *work) 2668 { 2669 struct mlx4_priv *priv = container_of(work, struct mlx4_priv, 2670 opreq_task); 2671 struct mlx4_dev *dev = &priv->dev; 2672 int num_tasks = atomic_read(&priv->opreq_count); 2673 struct mlx4_cmd_mailbox *mailbox; 2674 struct mlx4_mgm *mgm; 2675 u32 *outbox; 2676 u32 modifier; 2677 u16 token; 2678 u16 type; 2679 int err; 2680 u32 num_qps; 2681 struct mlx4_qp qp; 2682 int i; 2683 u8 rem_mcg; 2684 u8 prot; 2685 2686 #define GET_OP_REQ_MODIFIER_OFFSET 0x08 2687 #define GET_OP_REQ_TOKEN_OFFSET 0x14 2688 #define GET_OP_REQ_TYPE_OFFSET 0x1a 2689 #define GET_OP_REQ_DATA_OFFSET 0x20 2690 2691 mailbox = mlx4_alloc_cmd_mailbox(dev); 2692 if (IS_ERR(mailbox)) { 2693 mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n"); 2694 return; 2695 } 2696 outbox = mailbox->buf; 2697 2698 while (num_tasks) { 2699 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, 2700 MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, 2701 MLX4_CMD_NATIVE); 2702 if (err) { 2703 mlx4_err(dev, "Failed to retrieve required operation: %d\n", 2704 err); 2705 goto out; 2706 } 2707 MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET); 2708 MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET); 2709 MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET); 2710 type &= 0xfff; 2711 2712 switch (type) { 2713 case ADD_TO_MCG: 2714 if (dev->caps.steering_mode == 2715 MLX4_STEERING_MODE_DEVICE_MANAGED) { 2716 mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n"); 2717 err = EPERM; 2718 break; 2719 } 2720 mgm = (struct mlx4_mgm *)((u8 *)(outbox) + 2721 GET_OP_REQ_DATA_OFFSET); 2722 num_qps = be32_to_cpu(mgm->members_count) & 2723 MGM_QPN_MASK; 2724 rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1; 2725 prot = ((u8 *)(&mgm->members_count))[0] >> 6; 2726 2727 for (i = 0; i < num_qps; i++) { 2728 qp.qpn = be32_to_cpu(mgm->qp[i]); 2729 if (rem_mcg) 2730 err = mlx4_multicast_detach(dev, &qp, 2731 mgm->gid, 2732 prot, 0); 2733 else 2734 err = mlx4_multicast_attach(dev, &qp, 2735 mgm->gid, 2736 mgm->gid[5] 2737 , 0, prot, 2738 NULL); 2739 if (err) 2740 break; 2741 } 2742 break; 2743 default: 2744 mlx4_warn(dev, "Bad type for required operation\n"); 2745 err = EINVAL; 2746 break; 2747 } 2748 err = mlx4_cmd(dev, 0, ((u32) err | 2749 (__force u32)cpu_to_be32(token) << 16), 2750 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, 2751 MLX4_CMD_NATIVE); 2752 if (err) { 2753 mlx4_err(dev, "Failed to acknowledge required request: %d\n", 2754 err); 2755 goto out; 2756 } 2757 memset(outbox, 0, 0xffc); 2758 num_tasks = atomic_dec_return(&priv->opreq_count); 2759 } 2760 2761 out: 2762 mlx4_free_cmd_mailbox(dev, mailbox); 2763 } 2764 2765 static int mlx4_check_smp_firewall_active(struct mlx4_dev *dev, 2766 struct mlx4_cmd_mailbox *mailbox) 2767 { 2768 #define MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET 0x10 2769 #define MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET 0x20 2770 #define MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET 0x40 2771 #define MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET 0x70 2772 2773 u32 set_attr_mask, getresp_attr_mask; 2774 u32 trap_attr_mask, traprepress_attr_mask; 2775 2776 MLX4_GET(set_attr_mask, mailbox->buf, 2777 MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET); 2778 mlx4_dbg(dev, "SMP firewall set_attribute_mask = 0x%x\n", 2779 set_attr_mask); 2780 2781 MLX4_GET(getresp_attr_mask, mailbox->buf, 2782 MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET); 2783 mlx4_dbg(dev, "SMP firewall getresp_attribute_mask = 0x%x\n", 2784 getresp_attr_mask); 2785 2786 MLX4_GET(trap_attr_mask, mailbox->buf, 2787 MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET); 2788 mlx4_dbg(dev, "SMP firewall trap_attribute_mask = 0x%x\n", 2789 trap_attr_mask); 2790 2791 MLX4_GET(traprepress_attr_mask, mailbox->buf, 2792 MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET); 2793 mlx4_dbg(dev, "SMP firewall traprepress_attribute_mask = 0x%x\n", 2794 traprepress_attr_mask); 2795 2796 if (set_attr_mask && getresp_attr_mask && trap_attr_mask && 2797 traprepress_attr_mask) 2798 return 1; 2799 2800 return 0; 2801 } 2802 2803 int mlx4_config_mad_demux(struct mlx4_dev *dev) 2804 { 2805 struct mlx4_cmd_mailbox *mailbox; 2806 int err; 2807 2808 /* Check if mad_demux is supported */ 2809 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_MAD_DEMUX)) 2810 return 0; 2811 2812 mailbox = mlx4_alloc_cmd_mailbox(dev); 2813 if (IS_ERR(mailbox)) { 2814 mlx4_warn(dev, "Failed to allocate mailbox for cmd MAD_DEMUX"); 2815 return -ENOMEM; 2816 } 2817 2818 /* Query mad_demux to find out which MADs are handled by internal sma */ 2819 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0x01 /* subn mgmt class */, 2820 MLX4_CMD_MAD_DEMUX_QUERY_RESTR, MLX4_CMD_MAD_DEMUX, 2821 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 2822 if (err) { 2823 mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: query restrictions failed (%d)\n", 2824 err); 2825 goto out; 2826 } 2827 2828 if (mlx4_check_smp_firewall_active(dev, mailbox)) 2829 dev->flags |= MLX4_FLAG_SECURE_HOST; 2830 2831 /* Config mad_demux to handle all MADs returned by the query above */ 2832 err = mlx4_cmd(dev, mailbox->dma, 0x01 /* subn mgmt class */, 2833 MLX4_CMD_MAD_DEMUX_CONFIG, MLX4_CMD_MAD_DEMUX, 2834 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 2835 if (err) { 2836 mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: configure failed (%d)\n", err); 2837 goto out; 2838 } 2839 2840 if (dev->flags & MLX4_FLAG_SECURE_HOST) 2841 mlx4_warn(dev, "HCA operating in secure-host mode. SMP firewall activated.\n"); 2842 out: 2843 mlx4_free_cmd_mailbox(dev, mailbox); 2844 return err; 2845 } 2846 2847 /* Access Reg commands */ 2848 enum mlx4_access_reg_masks { 2849 MLX4_ACCESS_REG_STATUS_MASK = 0x7f, 2850 MLX4_ACCESS_REG_METHOD_MASK = 0x7f, 2851 MLX4_ACCESS_REG_LEN_MASK = 0x7ff 2852 }; 2853 2854 struct mlx4_access_reg { 2855 __be16 constant1; 2856 u8 status; 2857 u8 resrvd1; 2858 __be16 reg_id; 2859 u8 method; 2860 u8 constant2; 2861 __be32 resrvd2[2]; 2862 __be16 len_const; 2863 __be16 resrvd3; 2864 #define MLX4_ACCESS_REG_HEADER_SIZE (20) 2865 u8 reg_data[MLX4_MAILBOX_SIZE-MLX4_ACCESS_REG_HEADER_SIZE]; 2866 } __attribute__((__packed__)); 2867 2868 /** 2869 * mlx4_ACCESS_REG - Generic access reg command. 2870 * @dev: mlx4_dev. 2871 * @reg_id: register ID to access. 2872 * @method: Access method Read/Write. 2873 * @reg_len: register length to Read/Write in bytes. 2874 * @reg_data: reg_data pointer to Read/Write From/To. 2875 * 2876 * Access ConnectX registers FW command. 2877 * Returns 0 on success and copies outbox mlx4_access_reg data 2878 * field into reg_data or a negative error code. 2879 */ 2880 static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id, 2881 enum mlx4_access_reg_method method, 2882 u16 reg_len, void *reg_data) 2883 { 2884 struct mlx4_cmd_mailbox *inbox, *outbox; 2885 struct mlx4_access_reg *inbuf, *outbuf; 2886 int err; 2887 2888 inbox = mlx4_alloc_cmd_mailbox(dev); 2889 if (IS_ERR(inbox)) 2890 return PTR_ERR(inbox); 2891 2892 outbox = mlx4_alloc_cmd_mailbox(dev); 2893 if (IS_ERR(outbox)) { 2894 mlx4_free_cmd_mailbox(dev, inbox); 2895 return PTR_ERR(outbox); 2896 } 2897 2898 inbuf = inbox->buf; 2899 outbuf = outbox->buf; 2900 2901 inbuf->constant1 = cpu_to_be16(0x1<<11 | 0x4); 2902 inbuf->constant2 = 0x1; 2903 inbuf->reg_id = cpu_to_be16(reg_id); 2904 inbuf->method = method & MLX4_ACCESS_REG_METHOD_MASK; 2905 2906 reg_len = min(reg_len, (u16)(sizeof(inbuf->reg_data))); 2907 inbuf->len_const = 2908 cpu_to_be16(((reg_len/4 + 1) & MLX4_ACCESS_REG_LEN_MASK) | 2909 ((0x3) << 12)); 2910 2911 memcpy(inbuf->reg_data, reg_data, reg_len); 2912 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0, 2913 MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C, 2914 MLX4_CMD_WRAPPED); 2915 if (err) 2916 goto out; 2917 2918 if (outbuf->status & MLX4_ACCESS_REG_STATUS_MASK) { 2919 err = outbuf->status & MLX4_ACCESS_REG_STATUS_MASK; 2920 mlx4_err(dev, 2921 "MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n", 2922 reg_id, err); 2923 goto out; 2924 } 2925 2926 memcpy(reg_data, outbuf->reg_data, reg_len); 2927 out: 2928 mlx4_free_cmd_mailbox(dev, inbox); 2929 mlx4_free_cmd_mailbox(dev, outbox); 2930 return err; 2931 } 2932 2933 /* ConnectX registers IDs */ 2934 enum mlx4_reg_id { 2935 MLX4_REG_ID_PTYS = 0x5004, 2936 }; 2937 2938 /** 2939 * mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed) 2940 * register 2941 * @dev: mlx4_dev. 2942 * @method: Access method Read/Write. 2943 * @ptys_reg: PTYS register data pointer. 2944 * 2945 * Access ConnectX PTYS register, to Read/Write Port Type/Speed 2946 * configuration 2947 * Returns 0 on success or a negative error code. 2948 */ 2949 int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev, 2950 enum mlx4_access_reg_method method, 2951 struct mlx4_ptys_reg *ptys_reg) 2952 { 2953 return mlx4_ACCESS_REG(dev, MLX4_REG_ID_PTYS, 2954 method, sizeof(*ptys_reg), ptys_reg); 2955 } 2956 EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG); 2957 2958 int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave, 2959 struct mlx4_vhcr *vhcr, 2960 struct mlx4_cmd_mailbox *inbox, 2961 struct mlx4_cmd_mailbox *outbox, 2962 struct mlx4_cmd_info *cmd) 2963 { 2964 struct mlx4_access_reg *inbuf = inbox->buf; 2965 u8 method = inbuf->method & MLX4_ACCESS_REG_METHOD_MASK; 2966 u16 reg_id = be16_to_cpu(inbuf->reg_id); 2967 2968 if (slave != mlx4_master_func_num(dev) && 2969 method == MLX4_ACCESS_REG_WRITE) 2970 return -EPERM; 2971 2972 if (reg_id == MLX4_REG_ID_PTYS) { 2973 struct mlx4_ptys_reg *ptys_reg = 2974 (struct mlx4_ptys_reg *)inbuf->reg_data; 2975 2976 ptys_reg->local_port = 2977 mlx4_slave_convert_port(dev, slave, 2978 ptys_reg->local_port); 2979 } 2980 2981 return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier, 2982 0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C, 2983 MLX4_CMD_NATIVE); 2984 } 2985 2986 static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit) 2987 { 2988 #define SET_PORT_GEN_PHV_VALID 0x10 2989 #define SET_PORT_GEN_PHV_EN 0x80 2990 2991 struct mlx4_cmd_mailbox *mailbox; 2992 struct mlx4_set_port_general_context *context; 2993 u32 in_mod; 2994 int err; 2995 2996 mailbox = mlx4_alloc_cmd_mailbox(dev); 2997 if (IS_ERR(mailbox)) 2998 return PTR_ERR(mailbox); 2999 context = mailbox->buf; 3000 3001 context->v_ignore_fcs |= SET_PORT_GEN_PHV_VALID; 3002 if (phv_bit) 3003 context->phv_en |= SET_PORT_GEN_PHV_EN; 3004 3005 in_mod = MLX4_SET_PORT_GENERAL << 8 | port; 3006 err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, 3007 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 3008 MLX4_CMD_NATIVE); 3009 3010 mlx4_free_cmd_mailbox(dev, mailbox); 3011 return err; 3012 } 3013 3014 int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv) 3015 { 3016 int err; 3017 struct mlx4_func_cap func_cap; 3018 3019 memset(&func_cap, 0, sizeof(func_cap)); 3020 err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap); 3021 if (!err) 3022 *phv = func_cap.flags0 & QUERY_FUNC_CAP_PHV_BIT; 3023 return err; 3024 } 3025 EXPORT_SYMBOL(get_phv_bit); 3026 3027 int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val) 3028 { 3029 int ret; 3030 3031 if (mlx4_is_slave(dev)) 3032 return -EPERM; 3033 3034 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN && 3035 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) { 3036 ret = mlx4_SET_PORT_phv_bit(dev, port, new_val); 3037 if (!ret) 3038 dev->caps.phv_bit[port] = new_val; 3039 return ret; 3040 } 3041 3042 return -EOPNOTSUPP; 3043 } 3044 EXPORT_SYMBOL(set_phv_bit); 3045 3046 void mlx4_replace_zero_macs(struct mlx4_dev *dev) 3047 { 3048 int i; 3049 u8 mac_addr[ETH_ALEN]; 3050 3051 dev->port_random_macs = 0; 3052 for (i = 1; i <= dev->caps.num_ports; ++i) 3053 if (!dev->caps.def_mac[i] && 3054 dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) { 3055 random_ether_addr(mac_addr); 3056 dev->port_random_macs |= 1 << i; 3057 dev->caps.def_mac[i] = mlx4_mac_to_u64(mac_addr); 3058 } 3059 } 3060 EXPORT_SYMBOL_GPL(mlx4_replace_zero_macs); 3061