1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 5 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 6 * Copyright (c) 2004 Intel Corporation. All rights reserved. 7 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 8 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 9 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 10 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 11 * 12 * This software is available to you under a choice of one of two 13 * licenses. You may choose to be licensed under the terms of the GNU 14 * General Public License (GPL) Version 2, available from the file 15 * COPYING in the main directory of this source tree, or the 16 * OpenIB.org BSD license below: 17 * 18 * Redistribution and use in source and binary forms, with or 19 * without modification, are permitted provided that the following 20 * conditions are met: 21 * 22 * - Redistributions of source code must retain the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer. 25 * 26 * - Redistributions in binary form must reproduce the above 27 * copyright notice, this list of conditions and the following 28 * disclaimer in the documentation and/or other materials 29 * provided with the distribution. 30 * 31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 35 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 36 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 37 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 38 * SOFTWARE. 39 * 40 * $FreeBSD$ 41 */ 42 43 #if !defined(IB_VERBS_H) 44 #define IB_VERBS_H 45 46 #include <linux/types.h> 47 #include <linux/device.h> 48 #include <linux/mm.h> 49 #include <linux/dma-mapping.h> 50 #include <linux/kref.h> 51 #include <linux/list.h> 52 #include <linux/rwsem.h> 53 #include <linux/scatterlist.h> 54 #include <linux/workqueue.h> 55 #include <linux/socket.h> 56 #include <linux/if_ether.h> 57 #include <net/ipv6.h> 58 #include <net/ip.h> 59 #include <linux/string.h> 60 #include <linux/slab.h> 61 #include <linux/rcupdate.h> 62 #include <linux/netdevice.h> 63 #include <netinet/ip.h> 64 65 #include <asm/atomic.h> 66 #include <asm/uaccess.h> 67 68 struct ifla_vf_info; 69 struct ifla_vf_stats; 70 struct ib_uverbs_file; 71 72 extern struct workqueue_struct *ib_wq; 73 extern struct workqueue_struct *ib_comp_wq; 74 75 union ib_gid { 76 u8 raw[16]; 77 struct { 78 __be64 subnet_prefix; 79 __be64 interface_id; 80 } global; 81 }; 82 83 extern union ib_gid zgid; 84 85 enum ib_gid_type { 86 /* If link layer is Ethernet, this is RoCE V1 */ 87 IB_GID_TYPE_IB = 0, 88 IB_GID_TYPE_ROCE = 0, 89 IB_GID_TYPE_ROCE_UDP_ENCAP = 1, 90 IB_GID_TYPE_SIZE 91 }; 92 93 #define ROCE_V2_UDP_DPORT 4791 94 struct ib_gid_attr { 95 enum ib_gid_type gid_type; 96 struct ifnet *ndev; 97 }; 98 99 enum rdma_node_type { 100 /* IB values map to NodeInfo:NodeType. */ 101 RDMA_NODE_IB_CA = 1, 102 RDMA_NODE_IB_SWITCH, 103 RDMA_NODE_IB_ROUTER, 104 RDMA_NODE_RNIC, 105 RDMA_NODE_USNIC, 106 RDMA_NODE_USNIC_UDP, 107 }; 108 109 enum { 110 /* set the local administered indication */ 111 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2, 112 }; 113 114 enum rdma_transport_type { 115 RDMA_TRANSPORT_IB, 116 RDMA_TRANSPORT_IWARP, 117 RDMA_TRANSPORT_USNIC, 118 RDMA_TRANSPORT_USNIC_UDP 119 }; 120 121 enum rdma_protocol_type { 122 RDMA_PROTOCOL_IB, 123 RDMA_PROTOCOL_IBOE, 124 RDMA_PROTOCOL_IWARP, 125 RDMA_PROTOCOL_USNIC_UDP 126 }; 127 128 __attribute_const__ enum rdma_transport_type 129 rdma_node_get_transport(enum rdma_node_type node_type); 130 131 enum rdma_network_type { 132 RDMA_NETWORK_IB, 133 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB, 134 RDMA_NETWORK_IPV4, 135 RDMA_NETWORK_IPV6 136 }; 137 138 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type) 139 { 140 if (network_type == RDMA_NETWORK_IPV4 || 141 network_type == RDMA_NETWORK_IPV6) 142 return IB_GID_TYPE_ROCE_UDP_ENCAP; 143 144 /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */ 145 return IB_GID_TYPE_IB; 146 } 147 148 static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type, 149 union ib_gid *gid) 150 { 151 if (gid_type == IB_GID_TYPE_IB) 152 return RDMA_NETWORK_IB; 153 154 if (ipv6_addr_v4mapped((struct in6_addr *)gid)) 155 return RDMA_NETWORK_IPV4; 156 else 157 return RDMA_NETWORK_IPV6; 158 } 159 160 enum rdma_link_layer { 161 IB_LINK_LAYER_UNSPECIFIED, 162 IB_LINK_LAYER_INFINIBAND, 163 IB_LINK_LAYER_ETHERNET, 164 }; 165 166 enum ib_device_cap_flags { 167 IB_DEVICE_RESIZE_MAX_WR = (1 << 0), 168 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1), 169 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2), 170 IB_DEVICE_RAW_MULTI = (1 << 3), 171 IB_DEVICE_AUTO_PATH_MIG = (1 << 4), 172 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5), 173 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6), 174 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7), 175 IB_DEVICE_SHUTDOWN_PORT = (1 << 8), 176 IB_DEVICE_INIT_TYPE = (1 << 9), 177 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10), 178 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11), 179 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12), 180 IB_DEVICE_SRQ_RESIZE = (1 << 13), 181 IB_DEVICE_N_NOTIFY_CQ = (1 << 14), 182 183 /* 184 * This device supports a per-device lkey or stag that can be 185 * used without performing a memory registration for the local 186 * memory. Note that ULPs should never check this flag, but 187 * instead of use the local_dma_lkey flag in the ib_pd structure, 188 * which will always contain a usable lkey. 189 */ 190 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15), 191 IB_DEVICE_RESERVED /* old SEND_W_INV */ = (1 << 16), 192 IB_DEVICE_MEM_WINDOW = (1 << 17), 193 /* 194 * Devices should set IB_DEVICE_UD_IP_SUM if they support 195 * insertion of UDP and TCP checksum on outgoing UD IPoIB 196 * messages and can verify the validity of checksum for 197 * incoming messages. Setting this flag implies that the 198 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. 199 */ 200 IB_DEVICE_UD_IP_CSUM = (1 << 18), 201 IB_DEVICE_UD_TSO = (1 << 19), 202 IB_DEVICE_XRC = (1 << 20), 203 204 /* 205 * This device supports the IB "base memory management extension", 206 * which includes support for fast registrations (IB_WR_REG_MR, 207 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should 208 * also be set by any iWarp device which must support FRs to comply 209 * to the iWarp verbs spec. iWarp devices also support the 210 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the 211 * stag. 212 */ 213 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21), 214 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22), 215 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23), 216 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24), 217 IB_DEVICE_RC_IP_CSUM = (1 << 25), 218 IB_DEVICE_RAW_IP_CSUM = (1 << 26), 219 /* 220 * Devices should set IB_DEVICE_CROSS_CHANNEL if they 221 * support execution of WQEs that involve synchronization 222 * of I/O operations with single completion queue managed 223 * by hardware. 224 */ 225 IB_DEVICE_CROSS_CHANNEL = (1 << 27), 226 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), 227 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30), 228 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31), 229 IB_DEVICE_SG_GAPS_REG = (1ULL << 32), 230 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33), 231 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34), 232 }; 233 234 enum ib_signature_prot_cap { 235 IB_PROT_T10DIF_TYPE_1 = 1, 236 IB_PROT_T10DIF_TYPE_2 = 1 << 1, 237 IB_PROT_T10DIF_TYPE_3 = 1 << 2, 238 }; 239 240 enum ib_signature_guard_cap { 241 IB_GUARD_T10DIF_CRC = 1, 242 IB_GUARD_T10DIF_CSUM = 1 << 1, 243 }; 244 245 enum ib_atomic_cap { 246 IB_ATOMIC_NONE, 247 IB_ATOMIC_HCA, 248 IB_ATOMIC_GLOB 249 }; 250 251 enum ib_odp_general_cap_bits { 252 IB_ODP_SUPPORT = 1 << 0, 253 }; 254 255 enum ib_odp_transport_cap_bits { 256 IB_ODP_SUPPORT_SEND = 1 << 0, 257 IB_ODP_SUPPORT_RECV = 1 << 1, 258 IB_ODP_SUPPORT_WRITE = 1 << 2, 259 IB_ODP_SUPPORT_READ = 1 << 3, 260 IB_ODP_SUPPORT_ATOMIC = 1 << 4, 261 }; 262 263 struct ib_odp_caps { 264 uint64_t general_caps; 265 struct { 266 uint32_t rc_odp_caps; 267 uint32_t uc_odp_caps; 268 uint32_t ud_odp_caps; 269 } per_transport_caps; 270 }; 271 272 struct ib_rss_caps { 273 /* Corresponding bit will be set if qp type from 274 * 'enum ib_qp_type' is supported, e.g. 275 * supported_qpts |= 1 << IB_QPT_UD 276 */ 277 u32 supported_qpts; 278 u32 max_rwq_indirection_tables; 279 u32 max_rwq_indirection_table_size; 280 }; 281 282 enum ib_cq_creation_flags { 283 IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0, 284 IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1, 285 }; 286 287 struct ib_cq_init_attr { 288 unsigned int cqe; 289 u32 comp_vector; 290 u32 flags; 291 }; 292 293 struct ib_device_attr { 294 u64 fw_ver; 295 __be64 sys_image_guid; 296 u64 max_mr_size; 297 u64 page_size_cap; 298 u32 vendor_id; 299 u32 vendor_part_id; 300 u32 hw_ver; 301 int max_qp; 302 int max_qp_wr; 303 u64 device_cap_flags; 304 int max_sge; 305 int max_sge_rd; 306 int max_cq; 307 int max_cqe; 308 int max_mr; 309 int max_pd; 310 int max_qp_rd_atom; 311 int max_ee_rd_atom; 312 int max_res_rd_atom; 313 int max_qp_init_rd_atom; 314 int max_ee_init_rd_atom; 315 enum ib_atomic_cap atomic_cap; 316 enum ib_atomic_cap masked_atomic_cap; 317 int max_ee; 318 int max_rdd; 319 int max_mw; 320 int max_raw_ipv6_qp; 321 int max_raw_ethy_qp; 322 int max_mcast_grp; 323 int max_mcast_qp_attach; 324 int max_total_mcast_qp_attach; 325 int max_ah; 326 int max_fmr; 327 int max_map_per_fmr; 328 int max_srq; 329 int max_srq_wr; 330 int max_srq_sge; 331 unsigned int max_fast_reg_page_list_len; 332 u16 max_pkeys; 333 u8 local_ca_ack_delay; 334 int sig_prot_cap; 335 int sig_guard_cap; 336 struct ib_odp_caps odp_caps; 337 uint64_t timestamp_mask; 338 uint64_t hca_core_clock; /* in KHZ */ 339 struct ib_rss_caps rss_caps; 340 u32 max_wq_type_rq; 341 }; 342 343 enum ib_mtu { 344 IB_MTU_256 = 1, 345 IB_MTU_512 = 2, 346 IB_MTU_1024 = 3, 347 IB_MTU_2048 = 4, 348 IB_MTU_4096 = 5 349 }; 350 351 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) 352 { 353 switch (mtu) { 354 case IB_MTU_256: return 256; 355 case IB_MTU_512: return 512; 356 case IB_MTU_1024: return 1024; 357 case IB_MTU_2048: return 2048; 358 case IB_MTU_4096: return 4096; 359 default: return -1; 360 } 361 } 362 363 enum ib_port_state { 364 IB_PORT_NOP = 0, 365 IB_PORT_DOWN = 1, 366 IB_PORT_INIT = 2, 367 IB_PORT_ARMED = 3, 368 IB_PORT_ACTIVE = 4, 369 IB_PORT_ACTIVE_DEFER = 5, 370 IB_PORT_DUMMY = -1, /* force enum signed */ 371 }; 372 373 enum ib_port_cap_flags { 374 IB_PORT_SM = 1 << 1, 375 IB_PORT_NOTICE_SUP = 1 << 2, 376 IB_PORT_TRAP_SUP = 1 << 3, 377 IB_PORT_OPT_IPD_SUP = 1 << 4, 378 IB_PORT_AUTO_MIGR_SUP = 1 << 5, 379 IB_PORT_SL_MAP_SUP = 1 << 6, 380 IB_PORT_MKEY_NVRAM = 1 << 7, 381 IB_PORT_PKEY_NVRAM = 1 << 8, 382 IB_PORT_LED_INFO_SUP = 1 << 9, 383 IB_PORT_SM_DISABLED = 1 << 10, 384 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, 385 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, 386 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14, 387 IB_PORT_CM_SUP = 1 << 16, 388 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17, 389 IB_PORT_REINIT_SUP = 1 << 18, 390 IB_PORT_DEVICE_MGMT_SUP = 1 << 19, 391 IB_PORT_VENDOR_CLASS_SUP = 1 << 20, 392 IB_PORT_DR_NOTICE_SUP = 1 << 21, 393 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, 394 IB_PORT_BOOT_MGMT_SUP = 1 << 23, 395 IB_PORT_LINK_LATENCY_SUP = 1 << 24, 396 IB_PORT_CLIENT_REG_SUP = 1 << 25, 397 IB_PORT_IP_BASED_GIDS = 1 << 26, 398 }; 399 400 enum ib_port_phys_state { 401 IB_PORT_PHYS_STATE_SLEEP = 1, 402 IB_PORT_PHYS_STATE_POLLING = 2, 403 IB_PORT_PHYS_STATE_DISABLED = 3, 404 IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, 405 IB_PORT_PHYS_STATE_LINK_UP = 5, 406 IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, 407 IB_PORT_PHYS_STATE_PHY_TEST = 7, 408 }; 409 410 enum ib_port_width { 411 IB_WIDTH_1X = 1, 412 IB_WIDTH_2X = 16, 413 IB_WIDTH_4X = 2, 414 IB_WIDTH_8X = 4, 415 IB_WIDTH_12X = 8 416 }; 417 418 static inline int ib_width_enum_to_int(enum ib_port_width width) 419 { 420 switch (width) { 421 case IB_WIDTH_1X: return 1; 422 case IB_WIDTH_2X: return 2; 423 case IB_WIDTH_4X: return 4; 424 case IB_WIDTH_8X: return 8; 425 case IB_WIDTH_12X: return 12; 426 default: return -1; 427 } 428 } 429 430 enum ib_port_speed { 431 IB_SPEED_SDR = 1, 432 IB_SPEED_DDR = 2, 433 IB_SPEED_QDR = 4, 434 IB_SPEED_FDR10 = 8, 435 IB_SPEED_FDR = 16, 436 IB_SPEED_EDR = 32, 437 IB_SPEED_HDR = 64 438 }; 439 440 /** 441 * struct rdma_hw_stats 442 * @lock - Mutex to protect parallel write access to lifespan and values 443 * of counters, which are 64bits and not guaranteeed to be written 444 * atomicaly on 32bits systems. 445 * @timestamp - Used by the core code to track when the last update was 446 * @lifespan - Used by the core code to determine how old the counters 447 * should be before being updated again. Stored in jiffies, defaults 448 * to 10 milliseconds, drivers can override the default be specifying 449 * their own value during their allocation routine. 450 * @name - Array of pointers to static names used for the counters in 451 * directory. 452 * @num_counters - How many hardware counters there are. If name is 453 * shorter than this number, a kernel oops will result. Driver authors 454 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters) 455 * in their code to prevent this. 456 * @value - Array of u64 counters that are accessed by the sysfs code and 457 * filled in by the drivers get_stats routine 458 */ 459 struct rdma_hw_stats { 460 struct mutex lock; /* Protect lifespan and values[] */ 461 unsigned long timestamp; 462 unsigned long lifespan; 463 const char * const *names; 464 int num_counters; 465 u64 value[]; 466 }; 467 468 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10 469 /** 470 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct 471 * for drivers. 472 * @names - Array of static const char * 473 * @num_counters - How many elements in array 474 * @lifespan - How many milliseconds between updates 475 */ 476 static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct( 477 const char * const *names, int num_counters, 478 unsigned long lifespan) 479 { 480 struct rdma_hw_stats *stats; 481 482 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64), 483 GFP_KERNEL); 484 if (!stats) 485 return NULL; 486 stats->names = names; 487 stats->num_counters = num_counters; 488 stats->lifespan = msecs_to_jiffies(lifespan); 489 490 return stats; 491 } 492 493 494 /* Define bits for the various functionality this port needs to be supported by 495 * the core. 496 */ 497 /* Management 0x00000FFF */ 498 #define RDMA_CORE_CAP_IB_MAD 0x00000001 499 #define RDMA_CORE_CAP_IB_SMI 0x00000002 500 #define RDMA_CORE_CAP_IB_CM 0x00000004 501 #define RDMA_CORE_CAP_IW_CM 0x00000008 502 #define RDMA_CORE_CAP_IB_SA 0x00000010 503 #define RDMA_CORE_CAP_OPA_MAD 0x00000020 504 505 /* Address format 0x000FF000 */ 506 #define RDMA_CORE_CAP_AF_IB 0x00001000 507 #define RDMA_CORE_CAP_ETH_AH 0x00002000 508 509 /* Protocol 0xFFF00000 */ 510 #define RDMA_CORE_CAP_PROT_IB 0x00100000 511 #define RDMA_CORE_CAP_PROT_ROCE 0x00200000 512 #define RDMA_CORE_CAP_PROT_IWARP 0x00400000 513 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000 514 515 #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \ 516 | RDMA_CORE_CAP_IB_MAD \ 517 | RDMA_CORE_CAP_IB_SMI \ 518 | RDMA_CORE_CAP_IB_CM \ 519 | RDMA_CORE_CAP_IB_SA \ 520 | RDMA_CORE_CAP_AF_IB) 521 #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \ 522 | RDMA_CORE_CAP_IB_MAD \ 523 | RDMA_CORE_CAP_IB_CM \ 524 | RDMA_CORE_CAP_AF_IB \ 525 | RDMA_CORE_CAP_ETH_AH) 526 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \ 527 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \ 528 | RDMA_CORE_CAP_IB_MAD \ 529 | RDMA_CORE_CAP_IB_CM \ 530 | RDMA_CORE_CAP_AF_IB \ 531 | RDMA_CORE_CAP_ETH_AH) 532 #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \ 533 | RDMA_CORE_CAP_IW_CM) 534 #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \ 535 | RDMA_CORE_CAP_OPA_MAD) 536 537 struct ib_port_attr { 538 u64 subnet_prefix; 539 enum ib_port_state state; 540 enum ib_mtu max_mtu; 541 enum ib_mtu active_mtu; 542 int gid_tbl_len; 543 u32 port_cap_flags; 544 u32 max_msg_sz; 545 u32 bad_pkey_cntr; 546 u32 qkey_viol_cntr; 547 u16 pkey_tbl_len; 548 u16 lid; 549 u16 sm_lid; 550 u8 lmc; 551 u8 max_vl_num; 552 u8 sm_sl; 553 u8 subnet_timeout; 554 u8 init_type_reply; 555 u8 active_width; 556 u8 active_speed; 557 u8 phys_state; 558 bool grh_required; 559 }; 560 561 enum ib_device_modify_flags { 562 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, 563 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1 564 }; 565 566 #define IB_DEVICE_NODE_DESC_MAX 64 567 568 struct ib_device_modify { 569 u64 sys_image_guid; 570 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 571 }; 572 573 enum ib_port_modify_flags { 574 IB_PORT_SHUTDOWN = 1, 575 IB_PORT_INIT_TYPE = (1<<2), 576 IB_PORT_RESET_QKEY_CNTR = (1<<3) 577 }; 578 579 struct ib_port_modify { 580 u32 set_port_cap_mask; 581 u32 clr_port_cap_mask; 582 u8 init_type; 583 }; 584 585 enum ib_event_type { 586 IB_EVENT_CQ_ERR, 587 IB_EVENT_QP_FATAL, 588 IB_EVENT_QP_REQ_ERR, 589 IB_EVENT_QP_ACCESS_ERR, 590 IB_EVENT_COMM_EST, 591 IB_EVENT_SQ_DRAINED, 592 IB_EVENT_PATH_MIG, 593 IB_EVENT_PATH_MIG_ERR, 594 IB_EVENT_DEVICE_FATAL, 595 IB_EVENT_PORT_ACTIVE, 596 IB_EVENT_PORT_ERR, 597 IB_EVENT_LID_CHANGE, 598 IB_EVENT_PKEY_CHANGE, 599 IB_EVENT_SM_CHANGE, 600 IB_EVENT_SRQ_ERR, 601 IB_EVENT_SRQ_LIMIT_REACHED, 602 IB_EVENT_QP_LAST_WQE_REACHED, 603 IB_EVENT_CLIENT_REREGISTER, 604 IB_EVENT_GID_CHANGE, 605 IB_EVENT_WQ_FATAL, 606 }; 607 608 const char *__attribute_const__ ib_event_msg(enum ib_event_type event); 609 610 struct ib_event { 611 struct ib_device *device; 612 union { 613 struct ib_cq *cq; 614 struct ib_qp *qp; 615 struct ib_srq *srq; 616 struct ib_wq *wq; 617 u8 port_num; 618 } element; 619 enum ib_event_type event; 620 }; 621 622 struct ib_event_handler { 623 struct ib_device *device; 624 void (*handler)(struct ib_event_handler *, struct ib_event *); 625 struct list_head list; 626 }; 627 628 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ 629 do { \ 630 (_ptr)->device = _device; \ 631 (_ptr)->handler = _handler; \ 632 INIT_LIST_HEAD(&(_ptr)->list); \ 633 } while (0) 634 635 struct ib_global_route { 636 union ib_gid dgid; 637 u32 flow_label; 638 u8 sgid_index; 639 u8 hop_limit; 640 u8 traffic_class; 641 }; 642 643 struct ib_grh { 644 __be32 version_tclass_flow; 645 __be16 paylen; 646 u8 next_hdr; 647 u8 hop_limit; 648 union ib_gid sgid; 649 union ib_gid dgid; 650 }; 651 652 union rdma_network_hdr { 653 struct ib_grh ibgrh; 654 struct { 655 /* The IB spec states that if it's IPv4, the header 656 * is located in the last 20 bytes of the header. 657 */ 658 u8 reserved[20]; 659 struct ip roce4grh; 660 }; 661 }; 662 663 enum { 664 IB_MULTICAST_QPN = 0xffffff 665 }; 666 667 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF) 668 #define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000) 669 670 enum ib_ah_flags { 671 IB_AH_GRH = 1 672 }; 673 674 enum ib_rate { 675 IB_RATE_PORT_CURRENT = 0, 676 IB_RATE_2_5_GBPS = 2, 677 IB_RATE_5_GBPS = 5, 678 IB_RATE_10_GBPS = 3, 679 IB_RATE_20_GBPS = 6, 680 IB_RATE_30_GBPS = 4, 681 IB_RATE_40_GBPS = 7, 682 IB_RATE_60_GBPS = 8, 683 IB_RATE_80_GBPS = 9, 684 IB_RATE_120_GBPS = 10, 685 IB_RATE_14_GBPS = 11, 686 IB_RATE_56_GBPS = 12, 687 IB_RATE_112_GBPS = 13, 688 IB_RATE_168_GBPS = 14, 689 IB_RATE_25_GBPS = 15, 690 IB_RATE_100_GBPS = 16, 691 IB_RATE_200_GBPS = 17, 692 IB_RATE_300_GBPS = 18, 693 IB_RATE_28_GBPS = 19, 694 IB_RATE_50_GBPS = 20, 695 IB_RATE_400_GBPS = 21, 696 IB_RATE_600_GBPS = 22, 697 }; 698 699 /** 700 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the 701 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be 702 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. 703 * @rate: rate to convert. 704 */ 705 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate); 706 707 /** 708 * ib_rate_to_mbps - Convert the IB rate enum to Mbps. 709 * For example, IB_RATE_2_5_GBPS will be converted to 2500. 710 * @rate: rate to convert. 711 */ 712 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); 713 714 715 /** 716 * enum ib_mr_type - memory region type 717 * @IB_MR_TYPE_MEM_REG: memory region that is used for 718 * normal registration 719 * @IB_MR_TYPE_SIGNATURE: memory region that is used for 720 * signature operations (data-integrity 721 * capable regions) 722 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to 723 * register any arbitrary sg lists (without 724 * the normal mr constraints - see 725 * ib_map_mr_sg) 726 */ 727 enum ib_mr_type { 728 IB_MR_TYPE_MEM_REG, 729 IB_MR_TYPE_SIGNATURE, 730 IB_MR_TYPE_SG_GAPS, 731 }; 732 733 /** 734 * Signature types 735 * IB_SIG_TYPE_NONE: Unprotected. 736 * IB_SIG_TYPE_T10_DIF: Type T10-DIF 737 */ 738 enum ib_signature_type { 739 IB_SIG_TYPE_NONE, 740 IB_SIG_TYPE_T10_DIF, 741 }; 742 743 /** 744 * Signature T10-DIF block-guard types 745 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules. 746 * IB_T10DIF_CSUM: Corresponds to IP checksum rules. 747 */ 748 enum ib_t10_dif_bg_type { 749 IB_T10DIF_CRC, 750 IB_T10DIF_CSUM 751 }; 752 753 /** 754 * struct ib_t10_dif_domain - Parameters specific for T10-DIF 755 * domain. 756 * @bg_type: T10-DIF block guard type (CRC|CSUM) 757 * @pi_interval: protection information interval. 758 * @bg: seed of guard computation. 759 * @app_tag: application tag of guard block 760 * @ref_tag: initial guard block reference tag. 761 * @ref_remap: Indicate wethear the reftag increments each block 762 * @app_escape: Indicate to skip block check if apptag=0xffff 763 * @ref_escape: Indicate to skip block check if reftag=0xffffffff 764 * @apptag_check_mask: check bitmask of application tag. 765 */ 766 struct ib_t10_dif_domain { 767 enum ib_t10_dif_bg_type bg_type; 768 u16 pi_interval; 769 u16 bg; 770 u16 app_tag; 771 u32 ref_tag; 772 bool ref_remap; 773 bool app_escape; 774 bool ref_escape; 775 u16 apptag_check_mask; 776 }; 777 778 /** 779 * struct ib_sig_domain - Parameters for signature domain 780 * @sig_type: specific signauture type 781 * @sig: union of all signature domain attributes that may 782 * be used to set domain layout. 783 */ 784 struct ib_sig_domain { 785 enum ib_signature_type sig_type; 786 union { 787 struct ib_t10_dif_domain dif; 788 } sig; 789 }; 790 791 /** 792 * struct ib_sig_attrs - Parameters for signature handover operation 793 * @check_mask: bitmask for signature byte check (8 bytes) 794 * @mem: memory domain layout desciptor. 795 * @wire: wire domain layout desciptor. 796 */ 797 struct ib_sig_attrs { 798 u8 check_mask; 799 struct ib_sig_domain mem; 800 struct ib_sig_domain wire; 801 }; 802 803 enum ib_sig_err_type { 804 IB_SIG_BAD_GUARD, 805 IB_SIG_BAD_REFTAG, 806 IB_SIG_BAD_APPTAG, 807 }; 808 809 /** 810 * struct ib_sig_err - signature error descriptor 811 */ 812 struct ib_sig_err { 813 enum ib_sig_err_type err_type; 814 u32 expected; 815 u32 actual; 816 u64 sig_err_offset; 817 u32 key; 818 }; 819 820 enum ib_mr_status_check { 821 IB_MR_CHECK_SIG_STATUS = 1, 822 }; 823 824 /** 825 * struct ib_mr_status - Memory region status container 826 * 827 * @fail_status: Bitmask of MR checks status. For each 828 * failed check a corresponding status bit is set. 829 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS 830 * failure. 831 */ 832 struct ib_mr_status { 833 u32 fail_status; 834 struct ib_sig_err sig_err; 835 }; 836 837 /** 838 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate 839 * enum. 840 * @mult: multiple to convert. 841 */ 842 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult); 843 844 struct ib_ah_attr { 845 struct ib_global_route grh; 846 u16 dlid; 847 u8 sl; 848 u8 src_path_bits; 849 u8 static_rate; 850 u8 ah_flags; 851 u8 port_num; 852 u8 dmac[ETH_ALEN]; 853 }; 854 855 enum ib_wc_status { 856 IB_WC_SUCCESS, 857 IB_WC_LOC_LEN_ERR, 858 IB_WC_LOC_QP_OP_ERR, 859 IB_WC_LOC_EEC_OP_ERR, 860 IB_WC_LOC_PROT_ERR, 861 IB_WC_WR_FLUSH_ERR, 862 IB_WC_MW_BIND_ERR, 863 IB_WC_BAD_RESP_ERR, 864 IB_WC_LOC_ACCESS_ERR, 865 IB_WC_REM_INV_REQ_ERR, 866 IB_WC_REM_ACCESS_ERR, 867 IB_WC_REM_OP_ERR, 868 IB_WC_RETRY_EXC_ERR, 869 IB_WC_RNR_RETRY_EXC_ERR, 870 IB_WC_LOC_RDD_VIOL_ERR, 871 IB_WC_REM_INV_RD_REQ_ERR, 872 IB_WC_REM_ABORT_ERR, 873 IB_WC_INV_EECN_ERR, 874 IB_WC_INV_EEC_STATE_ERR, 875 IB_WC_FATAL_ERR, 876 IB_WC_RESP_TIMEOUT_ERR, 877 IB_WC_GENERAL_ERR 878 }; 879 880 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status); 881 882 enum ib_wc_opcode { 883 IB_WC_SEND, 884 IB_WC_RDMA_WRITE, 885 IB_WC_RDMA_READ, 886 IB_WC_COMP_SWAP, 887 IB_WC_FETCH_ADD, 888 IB_WC_LSO, 889 IB_WC_LOCAL_INV, 890 IB_WC_REG_MR, 891 IB_WC_MASKED_COMP_SWAP, 892 IB_WC_MASKED_FETCH_ADD, 893 /* 894 * Set value of IB_WC_RECV so consumers can test if a completion is a 895 * receive by testing (opcode & IB_WC_RECV). 896 */ 897 IB_WC_RECV = 1 << 7, 898 IB_WC_RECV_RDMA_WITH_IMM, 899 IB_WC_DUMMY = -1, /* force enum signed */ 900 }; 901 902 enum ib_wc_flags { 903 IB_WC_GRH = 1, 904 IB_WC_WITH_IMM = (1<<1), 905 IB_WC_WITH_INVALIDATE = (1<<2), 906 IB_WC_IP_CSUM_OK = (1<<3), 907 IB_WC_WITH_SMAC = (1<<4), 908 IB_WC_WITH_VLAN = (1<<5), 909 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6), 910 }; 911 912 struct ib_wc { 913 union { 914 u64 wr_id; 915 struct ib_cqe *wr_cqe; 916 }; 917 enum ib_wc_status status; 918 enum ib_wc_opcode opcode; 919 u32 vendor_err; 920 u32 byte_len; 921 struct ib_qp *qp; 922 union { 923 __be32 imm_data; 924 u32 invalidate_rkey; 925 } ex; 926 u32 src_qp; 927 int wc_flags; 928 u16 pkey_index; 929 u16 slid; 930 u8 sl; 931 u8 dlid_path_bits; 932 u8 port_num; /* valid only for DR SMPs on switches */ 933 u8 smac[ETH_ALEN]; 934 u16 vlan_id; 935 u8 network_hdr_type; 936 }; 937 938 enum ib_cq_notify_flags { 939 IB_CQ_SOLICITED = 1 << 0, 940 IB_CQ_NEXT_COMP = 1 << 1, 941 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP, 942 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2, 943 }; 944 945 enum ib_srq_type { 946 IB_SRQT_BASIC, 947 IB_SRQT_XRC 948 }; 949 950 enum ib_srq_attr_mask { 951 IB_SRQ_MAX_WR = 1 << 0, 952 IB_SRQ_LIMIT = 1 << 1, 953 }; 954 955 struct ib_srq_attr { 956 u32 max_wr; 957 u32 max_sge; 958 u32 srq_limit; 959 }; 960 961 struct ib_srq_init_attr { 962 void (*event_handler)(struct ib_event *, void *); 963 void *srq_context; 964 struct ib_srq_attr attr; 965 enum ib_srq_type srq_type; 966 967 union { 968 struct { 969 struct ib_xrcd *xrcd; 970 struct ib_cq *cq; 971 } xrc; 972 } ext; 973 }; 974 975 struct ib_qp_cap { 976 u32 max_send_wr; 977 u32 max_recv_wr; 978 u32 max_send_sge; 979 u32 max_recv_sge; 980 u32 max_inline_data; 981 982 /* 983 * Maximum number of rdma_rw_ctx structures in flight at a time. 984 * ib_create_qp() will calculate the right amount of neededed WRs 985 * and MRs based on this. 986 */ 987 u32 max_rdma_ctxs; 988 }; 989 990 enum ib_sig_type { 991 IB_SIGNAL_ALL_WR, 992 IB_SIGNAL_REQ_WR 993 }; 994 995 enum ib_qp_type { 996 /* 997 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries 998 * here (and in that order) since the MAD layer uses them as 999 * indices into a 2-entry table. 1000 */ 1001 IB_QPT_SMI, 1002 IB_QPT_GSI, 1003 1004 IB_QPT_RC, 1005 IB_QPT_UC, 1006 IB_QPT_UD, 1007 IB_QPT_RAW_IPV6, 1008 IB_QPT_RAW_ETHERTYPE, 1009 IB_QPT_RAW_PACKET = 8, 1010 IB_QPT_XRC_INI = 9, 1011 IB_QPT_XRC_TGT, 1012 IB_QPT_MAX, 1013 /* Reserve a range for qp types internal to the low level driver. 1014 * These qp types will not be visible at the IB core layer, so the 1015 * IB_QPT_MAX usages should not be affected in the core layer 1016 */ 1017 IB_QPT_RESERVED1 = 0x1000, 1018 IB_QPT_RESERVED2, 1019 IB_QPT_RESERVED3, 1020 IB_QPT_RESERVED4, 1021 IB_QPT_RESERVED5, 1022 IB_QPT_RESERVED6, 1023 IB_QPT_RESERVED7, 1024 IB_QPT_RESERVED8, 1025 IB_QPT_RESERVED9, 1026 IB_QPT_RESERVED10, 1027 }; 1028 1029 enum ib_qp_create_flags { 1030 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, 1031 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, 1032 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2, 1033 IB_QP_CREATE_MANAGED_SEND = 1 << 3, 1034 IB_QP_CREATE_MANAGED_RECV = 1 << 4, 1035 IB_QP_CREATE_NETIF_QP = 1 << 5, 1036 IB_QP_CREATE_SIGNATURE_EN = 1 << 6, 1037 IB_QP_CREATE_USE_GFP_NOIO = 1 << 7, 1038 IB_QP_CREATE_SCATTER_FCS = 1 << 8, 1039 /* reserve bits 26-31 for low level drivers' internal use */ 1040 IB_QP_CREATE_RESERVED_START = 1 << 26, 1041 IB_QP_CREATE_RESERVED_END = 1 << 31, 1042 }; 1043 1044 /* 1045 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler 1046 * callback to destroy the passed in QP. 1047 */ 1048 1049 struct ib_qp_init_attr { 1050 void (*event_handler)(struct ib_event *, void *); 1051 void *qp_context; 1052 struct ib_cq *send_cq; 1053 struct ib_cq *recv_cq; 1054 struct ib_srq *srq; 1055 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1056 struct ib_qp_cap cap; 1057 enum ib_sig_type sq_sig_type; 1058 enum ib_qp_type qp_type; 1059 enum ib_qp_create_flags create_flags; 1060 1061 /* 1062 * Only needed for special QP types, or when using the RW API. 1063 */ 1064 u8 port_num; 1065 struct ib_rwq_ind_table *rwq_ind_tbl; 1066 }; 1067 1068 struct ib_qp_open_attr { 1069 void (*event_handler)(struct ib_event *, void *); 1070 void *qp_context; 1071 u32 qp_num; 1072 enum ib_qp_type qp_type; 1073 }; 1074 1075 enum ib_rnr_timeout { 1076 IB_RNR_TIMER_655_36 = 0, 1077 IB_RNR_TIMER_000_01 = 1, 1078 IB_RNR_TIMER_000_02 = 2, 1079 IB_RNR_TIMER_000_03 = 3, 1080 IB_RNR_TIMER_000_04 = 4, 1081 IB_RNR_TIMER_000_06 = 5, 1082 IB_RNR_TIMER_000_08 = 6, 1083 IB_RNR_TIMER_000_12 = 7, 1084 IB_RNR_TIMER_000_16 = 8, 1085 IB_RNR_TIMER_000_24 = 9, 1086 IB_RNR_TIMER_000_32 = 10, 1087 IB_RNR_TIMER_000_48 = 11, 1088 IB_RNR_TIMER_000_64 = 12, 1089 IB_RNR_TIMER_000_96 = 13, 1090 IB_RNR_TIMER_001_28 = 14, 1091 IB_RNR_TIMER_001_92 = 15, 1092 IB_RNR_TIMER_002_56 = 16, 1093 IB_RNR_TIMER_003_84 = 17, 1094 IB_RNR_TIMER_005_12 = 18, 1095 IB_RNR_TIMER_007_68 = 19, 1096 IB_RNR_TIMER_010_24 = 20, 1097 IB_RNR_TIMER_015_36 = 21, 1098 IB_RNR_TIMER_020_48 = 22, 1099 IB_RNR_TIMER_030_72 = 23, 1100 IB_RNR_TIMER_040_96 = 24, 1101 IB_RNR_TIMER_061_44 = 25, 1102 IB_RNR_TIMER_081_92 = 26, 1103 IB_RNR_TIMER_122_88 = 27, 1104 IB_RNR_TIMER_163_84 = 28, 1105 IB_RNR_TIMER_245_76 = 29, 1106 IB_RNR_TIMER_327_68 = 30, 1107 IB_RNR_TIMER_491_52 = 31 1108 }; 1109 1110 enum ib_qp_attr_mask { 1111 IB_QP_STATE = 1, 1112 IB_QP_CUR_STATE = (1<<1), 1113 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), 1114 IB_QP_ACCESS_FLAGS = (1<<3), 1115 IB_QP_PKEY_INDEX = (1<<4), 1116 IB_QP_PORT = (1<<5), 1117 IB_QP_QKEY = (1<<6), 1118 IB_QP_AV = (1<<7), 1119 IB_QP_PATH_MTU = (1<<8), 1120 IB_QP_TIMEOUT = (1<<9), 1121 IB_QP_RETRY_CNT = (1<<10), 1122 IB_QP_RNR_RETRY = (1<<11), 1123 IB_QP_RQ_PSN = (1<<12), 1124 IB_QP_MAX_QP_RD_ATOMIC = (1<<13), 1125 IB_QP_ALT_PATH = (1<<14), 1126 IB_QP_MIN_RNR_TIMER = (1<<15), 1127 IB_QP_SQ_PSN = (1<<16), 1128 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), 1129 IB_QP_PATH_MIG_STATE = (1<<18), 1130 IB_QP_CAP = (1<<19), 1131 IB_QP_DEST_QPN = (1<<20), 1132 IB_QP_RESERVED1 = (1<<21), 1133 IB_QP_RESERVED2 = (1<<22), 1134 IB_QP_RESERVED3 = (1<<23), 1135 IB_QP_RESERVED4 = (1<<24), 1136 IB_QP_RATE_LIMIT = (1<<25), 1137 }; 1138 1139 enum ib_qp_state { 1140 IB_QPS_RESET, 1141 IB_QPS_INIT, 1142 IB_QPS_RTR, 1143 IB_QPS_RTS, 1144 IB_QPS_SQD, 1145 IB_QPS_SQE, 1146 IB_QPS_ERR, 1147 IB_QPS_DUMMY = -1, /* force enum signed */ 1148 }; 1149 1150 enum ib_mig_state { 1151 IB_MIG_MIGRATED, 1152 IB_MIG_REARM, 1153 IB_MIG_ARMED 1154 }; 1155 1156 enum ib_mw_type { 1157 IB_MW_TYPE_1 = 1, 1158 IB_MW_TYPE_2 = 2 1159 }; 1160 1161 struct ib_qp_attr { 1162 enum ib_qp_state qp_state; 1163 enum ib_qp_state cur_qp_state; 1164 enum ib_mtu path_mtu; 1165 enum ib_mig_state path_mig_state; 1166 u32 qkey; 1167 u32 rq_psn; 1168 u32 sq_psn; 1169 u32 dest_qp_num; 1170 int qp_access_flags; 1171 struct ib_qp_cap cap; 1172 struct ib_ah_attr ah_attr; 1173 struct ib_ah_attr alt_ah_attr; 1174 u16 pkey_index; 1175 u16 alt_pkey_index; 1176 u8 en_sqd_async_notify; 1177 u8 sq_draining; 1178 u8 max_rd_atomic; 1179 u8 max_dest_rd_atomic; 1180 u8 min_rnr_timer; 1181 u8 port_num; 1182 u8 timeout; 1183 u8 retry_cnt; 1184 u8 rnr_retry; 1185 u8 alt_port_num; 1186 u8 alt_timeout; 1187 u32 rate_limit; 1188 }; 1189 1190 enum ib_wr_opcode { 1191 IB_WR_RDMA_WRITE, 1192 IB_WR_RDMA_WRITE_WITH_IMM, 1193 IB_WR_SEND, 1194 IB_WR_SEND_WITH_IMM, 1195 IB_WR_RDMA_READ, 1196 IB_WR_ATOMIC_CMP_AND_SWP, 1197 IB_WR_ATOMIC_FETCH_AND_ADD, 1198 IB_WR_LSO, 1199 IB_WR_SEND_WITH_INV, 1200 IB_WR_RDMA_READ_WITH_INV, 1201 IB_WR_LOCAL_INV, 1202 IB_WR_REG_MR, 1203 IB_WR_MASKED_ATOMIC_CMP_AND_SWP, 1204 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, 1205 IB_WR_REG_SIG_MR, 1206 /* reserve values for low level drivers' internal use. 1207 * These values will not be used at all in the ib core layer. 1208 */ 1209 IB_WR_RESERVED1 = 0xf0, 1210 IB_WR_RESERVED2, 1211 IB_WR_RESERVED3, 1212 IB_WR_RESERVED4, 1213 IB_WR_RESERVED5, 1214 IB_WR_RESERVED6, 1215 IB_WR_RESERVED7, 1216 IB_WR_RESERVED8, 1217 IB_WR_RESERVED9, 1218 IB_WR_RESERVED10, 1219 IB_WR_DUMMY = -1, /* force enum signed */ 1220 }; 1221 1222 enum ib_send_flags { 1223 IB_SEND_FENCE = 1, 1224 IB_SEND_SIGNALED = (1<<1), 1225 IB_SEND_SOLICITED = (1<<2), 1226 IB_SEND_INLINE = (1<<3), 1227 IB_SEND_IP_CSUM = (1<<4), 1228 1229 /* reserve bits 26-31 for low level drivers' internal use */ 1230 IB_SEND_RESERVED_START = (1 << 26), 1231 IB_SEND_RESERVED_END = (1 << 31), 1232 }; 1233 1234 struct ib_sge { 1235 u64 addr; 1236 u32 length; 1237 u32 lkey; 1238 }; 1239 1240 struct ib_cqe { 1241 void (*done)(struct ib_cq *cq, struct ib_wc *wc); 1242 }; 1243 1244 struct ib_send_wr { 1245 struct ib_send_wr *next; 1246 union { 1247 u64 wr_id; 1248 struct ib_cqe *wr_cqe; 1249 }; 1250 struct ib_sge *sg_list; 1251 int num_sge; 1252 enum ib_wr_opcode opcode; 1253 int send_flags; 1254 union { 1255 __be32 imm_data; 1256 u32 invalidate_rkey; 1257 } ex; 1258 }; 1259 1260 struct ib_rdma_wr { 1261 struct ib_send_wr wr; 1262 u64 remote_addr; 1263 u32 rkey; 1264 }; 1265 1266 static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr) 1267 { 1268 return container_of(wr, struct ib_rdma_wr, wr); 1269 } 1270 1271 struct ib_atomic_wr { 1272 struct ib_send_wr wr; 1273 u64 remote_addr; 1274 u64 compare_add; 1275 u64 swap; 1276 u64 compare_add_mask; 1277 u64 swap_mask; 1278 u32 rkey; 1279 }; 1280 1281 static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr) 1282 { 1283 return container_of(wr, struct ib_atomic_wr, wr); 1284 } 1285 1286 struct ib_ud_wr { 1287 struct ib_send_wr wr; 1288 struct ib_ah *ah; 1289 void *header; 1290 int hlen; 1291 int mss; 1292 u32 remote_qpn; 1293 u32 remote_qkey; 1294 u16 pkey_index; /* valid for GSI only */ 1295 u8 port_num; /* valid for DR SMPs on switch only */ 1296 }; 1297 1298 static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr) 1299 { 1300 return container_of(wr, struct ib_ud_wr, wr); 1301 } 1302 1303 struct ib_reg_wr { 1304 struct ib_send_wr wr; 1305 struct ib_mr *mr; 1306 u32 key; 1307 int access; 1308 }; 1309 1310 static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr) 1311 { 1312 return container_of(wr, struct ib_reg_wr, wr); 1313 } 1314 1315 struct ib_sig_handover_wr { 1316 struct ib_send_wr wr; 1317 struct ib_sig_attrs *sig_attrs; 1318 struct ib_mr *sig_mr; 1319 int access_flags; 1320 struct ib_sge *prot; 1321 }; 1322 1323 static inline const struct ib_sig_handover_wr *sig_handover_wr(const struct ib_send_wr *wr) 1324 { 1325 return container_of(wr, struct ib_sig_handover_wr, wr); 1326 } 1327 1328 struct ib_recv_wr { 1329 struct ib_recv_wr *next; 1330 union { 1331 u64 wr_id; 1332 struct ib_cqe *wr_cqe; 1333 }; 1334 struct ib_sge *sg_list; 1335 int num_sge; 1336 }; 1337 1338 enum ib_access_flags { 1339 IB_ACCESS_LOCAL_WRITE = 1, 1340 IB_ACCESS_REMOTE_WRITE = (1<<1), 1341 IB_ACCESS_REMOTE_READ = (1<<2), 1342 IB_ACCESS_REMOTE_ATOMIC = (1<<3), 1343 IB_ACCESS_MW_BIND = (1<<4), 1344 IB_ZERO_BASED = (1<<5), 1345 IB_ACCESS_ON_DEMAND = (1<<6), 1346 }; 1347 1348 /* 1349 * XXX: these are apparently used for ->rereg_user_mr, no idea why they 1350 * are hidden here instead of a uapi header! 1351 */ 1352 enum ib_mr_rereg_flags { 1353 IB_MR_REREG_TRANS = 1, 1354 IB_MR_REREG_PD = (1<<1), 1355 IB_MR_REREG_ACCESS = (1<<2), 1356 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1) 1357 }; 1358 1359 struct ib_fmr_attr { 1360 int max_pages; 1361 int max_maps; 1362 u8 page_shift; 1363 }; 1364 1365 struct ib_umem; 1366 1367 enum rdma_remove_reason { 1368 /* 1369 * Userspace requested uobject deletion or initial try 1370 * to remove uobject via cleanup. Call could fail 1371 */ 1372 RDMA_REMOVE_DESTROY, 1373 /* Context deletion. This call should delete the actual object itself */ 1374 RDMA_REMOVE_CLOSE, 1375 /* Driver is being hot-unplugged. This call should delete the actual object itself */ 1376 RDMA_REMOVE_DRIVER_REMOVE, 1377 /* uobj is being cleaned-up before being committed */ 1378 RDMA_REMOVE_ABORT, 1379 }; 1380 1381 struct ib_ucontext { 1382 struct ib_device *device; 1383 struct list_head pd_list; 1384 struct list_head mr_list; 1385 struct list_head mw_list; 1386 struct list_head cq_list; 1387 struct list_head qp_list; 1388 struct list_head srq_list; 1389 struct list_head ah_list; 1390 struct list_head xrcd_list; 1391 struct list_head rule_list; 1392 struct list_head wq_list; 1393 struct list_head rwq_ind_tbl_list; 1394 int closing; 1395 1396 bool cleanup_retryable; 1397 1398 pid_t tgid; 1399 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1400 struct rb_root umem_tree; 1401 /* 1402 * Protects .umem_rbroot and tree, as well as odp_mrs_count and 1403 * mmu notifiers registration. 1404 */ 1405 struct rw_semaphore umem_rwsem; 1406 void (*invalidate_range)(struct ib_umem *umem, 1407 unsigned long start, unsigned long end); 1408 1409 struct mmu_notifier mn; 1410 atomic_t notifier_count; 1411 /* A list of umems that don't have private mmu notifier counters yet. */ 1412 struct list_head no_private_counters; 1413 int odp_mrs_count; 1414 #endif 1415 }; 1416 1417 struct ib_uobject { 1418 u64 user_handle; /* handle given to us by userspace */ 1419 struct ib_ucontext *context; /* associated user context */ 1420 void *object; /* containing object */ 1421 struct list_head list; /* link to context's list */ 1422 int id; /* index into kernel idr */ 1423 struct kref ref; 1424 struct rw_semaphore mutex; /* protects .live */ 1425 struct rcu_head rcu; /* kfree_rcu() overhead */ 1426 int live; 1427 }; 1428 1429 struct ib_udata { 1430 const void __user *inbuf; 1431 void __user *outbuf; 1432 size_t inlen; 1433 size_t outlen; 1434 }; 1435 1436 struct ib_pd { 1437 u32 local_dma_lkey; 1438 u32 flags; 1439 struct ib_device *device; 1440 struct ib_uobject *uobject; 1441 atomic_t usecnt; /* count all resources */ 1442 1443 u32 unsafe_global_rkey; 1444 1445 /* 1446 * Implementation details of the RDMA core, don't use in drivers: 1447 */ 1448 struct ib_mr *__internal_mr; 1449 }; 1450 1451 struct ib_xrcd { 1452 struct ib_device *device; 1453 atomic_t usecnt; /* count all exposed resources */ 1454 struct inode *inode; 1455 1456 struct mutex tgt_qp_mutex; 1457 struct list_head tgt_qp_list; 1458 }; 1459 1460 struct ib_ah { 1461 struct ib_device *device; 1462 struct ib_pd *pd; 1463 struct ib_uobject *uobject; 1464 }; 1465 1466 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); 1467 1468 enum ib_poll_context { 1469 IB_POLL_DIRECT, /* caller context, no hw completions */ 1470 IB_POLL_SOFTIRQ, /* poll from softirq context */ 1471 IB_POLL_WORKQUEUE, /* poll from workqueue */ 1472 }; 1473 1474 struct ib_cq { 1475 struct ib_device *device; 1476 struct ib_uobject *uobject; 1477 ib_comp_handler comp_handler; 1478 void (*event_handler)(struct ib_event *, void *); 1479 void *cq_context; 1480 int cqe; 1481 atomic_t usecnt; /* count number of work queues */ 1482 enum ib_poll_context poll_ctx; 1483 struct work_struct work; 1484 }; 1485 1486 struct ib_srq { 1487 struct ib_device *device; 1488 struct ib_pd *pd; 1489 struct ib_uobject *uobject; 1490 void (*event_handler)(struct ib_event *, void *); 1491 void *srq_context; 1492 enum ib_srq_type srq_type; 1493 atomic_t usecnt; 1494 1495 union { 1496 struct { 1497 struct ib_xrcd *xrcd; 1498 struct ib_cq *cq; 1499 u32 srq_num; 1500 } xrc; 1501 } ext; 1502 }; 1503 1504 enum ib_wq_type { 1505 IB_WQT_RQ 1506 }; 1507 1508 enum ib_wq_state { 1509 IB_WQS_RESET, 1510 IB_WQS_RDY, 1511 IB_WQS_ERR 1512 }; 1513 1514 struct ib_wq { 1515 struct ib_device *device; 1516 struct ib_uobject *uobject; 1517 void *wq_context; 1518 void (*event_handler)(struct ib_event *, void *); 1519 struct ib_pd *pd; 1520 struct ib_cq *cq; 1521 u32 wq_num; 1522 enum ib_wq_state state; 1523 enum ib_wq_type wq_type; 1524 atomic_t usecnt; 1525 }; 1526 1527 struct ib_wq_init_attr { 1528 void *wq_context; 1529 enum ib_wq_type wq_type; 1530 u32 max_wr; 1531 u32 max_sge; 1532 struct ib_cq *cq; 1533 void (*event_handler)(struct ib_event *, void *); 1534 }; 1535 1536 enum ib_wq_attr_mask { 1537 IB_WQ_STATE = 1 << 0, 1538 IB_WQ_CUR_STATE = 1 << 1, 1539 }; 1540 1541 struct ib_wq_attr { 1542 enum ib_wq_state wq_state; 1543 enum ib_wq_state curr_wq_state; 1544 }; 1545 1546 struct ib_rwq_ind_table { 1547 struct ib_device *device; 1548 struct ib_uobject *uobject; 1549 atomic_t usecnt; 1550 u32 ind_tbl_num; 1551 u32 log_ind_tbl_size; 1552 struct ib_wq **ind_tbl; 1553 }; 1554 1555 struct ib_rwq_ind_table_init_attr { 1556 u32 log_ind_tbl_size; 1557 /* Each entry is a pointer to Receive Work Queue */ 1558 struct ib_wq **ind_tbl; 1559 }; 1560 1561 /* 1562 * @max_write_sge: Maximum SGE elements per RDMA WRITE request. 1563 * @max_read_sge: Maximum SGE elements per RDMA READ request. 1564 */ 1565 struct ib_qp { 1566 struct ib_device *device; 1567 struct ib_pd *pd; 1568 struct ib_cq *send_cq; 1569 struct ib_cq *recv_cq; 1570 spinlock_t mr_lock; 1571 struct ib_srq *srq; 1572 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1573 struct list_head xrcd_list; 1574 1575 /* count times opened, mcast attaches, flow attaches */ 1576 atomic_t usecnt; 1577 struct list_head open_list; 1578 struct ib_qp *real_qp; 1579 struct ib_uobject *uobject; 1580 void (*event_handler)(struct ib_event *, void *); 1581 void *qp_context; 1582 u32 qp_num; 1583 u32 max_write_sge; 1584 u32 max_read_sge; 1585 enum ib_qp_type qp_type; 1586 struct ib_rwq_ind_table *rwq_ind_tbl; 1587 }; 1588 1589 struct ib_mr { 1590 struct ib_device *device; 1591 struct ib_pd *pd; 1592 u32 lkey; 1593 u32 rkey; 1594 u64 iova; 1595 u64 length; 1596 unsigned int page_size; 1597 bool need_inval; 1598 union { 1599 struct ib_uobject *uobject; /* user */ 1600 struct list_head qp_entry; /* FR */ 1601 }; 1602 }; 1603 1604 struct ib_mw { 1605 struct ib_device *device; 1606 struct ib_pd *pd; 1607 struct ib_uobject *uobject; 1608 u32 rkey; 1609 enum ib_mw_type type; 1610 }; 1611 1612 struct ib_fmr { 1613 struct ib_device *device; 1614 struct ib_pd *pd; 1615 struct list_head list; 1616 u32 lkey; 1617 u32 rkey; 1618 }; 1619 1620 /* Supported steering options */ 1621 enum ib_flow_attr_type { 1622 /* steering according to rule specifications */ 1623 IB_FLOW_ATTR_NORMAL = 0x0, 1624 /* default unicast and multicast rule - 1625 * receive all Eth traffic which isn't steered to any QP 1626 */ 1627 IB_FLOW_ATTR_ALL_DEFAULT = 0x1, 1628 /* default multicast rule - 1629 * receive all Eth multicast traffic which isn't steered to any QP 1630 */ 1631 IB_FLOW_ATTR_MC_DEFAULT = 0x2, 1632 /* sniffer rule - receive all port traffic */ 1633 IB_FLOW_ATTR_SNIFFER = 0x3 1634 }; 1635 1636 /* Supported steering header types */ 1637 enum ib_flow_spec_type { 1638 /* L2 headers*/ 1639 IB_FLOW_SPEC_ETH = 0x20, 1640 IB_FLOW_SPEC_IB = 0x22, 1641 /* L3 header*/ 1642 IB_FLOW_SPEC_IPV4 = 0x30, 1643 IB_FLOW_SPEC_IPV6 = 0x31, 1644 /* L4 headers*/ 1645 IB_FLOW_SPEC_TCP = 0x40, 1646 IB_FLOW_SPEC_UDP = 0x41 1647 }; 1648 #define IB_FLOW_SPEC_LAYER_MASK 0xF0 1649 #define IB_FLOW_SPEC_SUPPORT_LAYERS 4 1650 1651 /* Flow steering rule priority is set according to it's domain. 1652 * Lower domain value means higher priority. 1653 */ 1654 enum ib_flow_domain { 1655 IB_FLOW_DOMAIN_USER, 1656 IB_FLOW_DOMAIN_ETHTOOL, 1657 IB_FLOW_DOMAIN_RFS, 1658 IB_FLOW_DOMAIN_NIC, 1659 IB_FLOW_DOMAIN_NUM /* Must be last */ 1660 }; 1661 1662 enum ib_flow_flags { 1663 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */ 1664 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 2 /* Must be last */ 1665 }; 1666 1667 struct ib_flow_eth_filter { 1668 u8 dst_mac[6]; 1669 u8 src_mac[6]; 1670 __be16 ether_type; 1671 __be16 vlan_tag; 1672 /* Must be last */ 1673 u8 real_sz[0]; 1674 }; 1675 1676 struct ib_flow_spec_eth { 1677 enum ib_flow_spec_type type; 1678 u16 size; 1679 struct ib_flow_eth_filter val; 1680 struct ib_flow_eth_filter mask; 1681 }; 1682 1683 struct ib_flow_ib_filter { 1684 __be16 dlid; 1685 __u8 sl; 1686 /* Must be last */ 1687 u8 real_sz[0]; 1688 }; 1689 1690 struct ib_flow_spec_ib { 1691 enum ib_flow_spec_type type; 1692 u16 size; 1693 struct ib_flow_ib_filter val; 1694 struct ib_flow_ib_filter mask; 1695 }; 1696 1697 /* IPv4 header flags */ 1698 enum ib_ipv4_flags { 1699 IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */ 1700 IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the 1701 last have this flag set */ 1702 }; 1703 1704 struct ib_flow_ipv4_filter { 1705 __be32 src_ip; 1706 __be32 dst_ip; 1707 u8 proto; 1708 u8 tos; 1709 u8 ttl; 1710 u8 flags; 1711 /* Must be last */ 1712 u8 real_sz[0]; 1713 }; 1714 1715 struct ib_flow_spec_ipv4 { 1716 enum ib_flow_spec_type type; 1717 u16 size; 1718 struct ib_flow_ipv4_filter val; 1719 struct ib_flow_ipv4_filter mask; 1720 }; 1721 1722 struct ib_flow_ipv6_filter { 1723 u8 src_ip[16]; 1724 u8 dst_ip[16]; 1725 __be32 flow_label; 1726 u8 next_hdr; 1727 u8 traffic_class; 1728 u8 hop_limit; 1729 /* Must be last */ 1730 u8 real_sz[0]; 1731 }; 1732 1733 struct ib_flow_spec_ipv6 { 1734 enum ib_flow_spec_type type; 1735 u16 size; 1736 struct ib_flow_ipv6_filter val; 1737 struct ib_flow_ipv6_filter mask; 1738 }; 1739 1740 struct ib_flow_tcp_udp_filter { 1741 __be16 dst_port; 1742 __be16 src_port; 1743 /* Must be last */ 1744 u8 real_sz[0]; 1745 }; 1746 1747 struct ib_flow_spec_tcp_udp { 1748 enum ib_flow_spec_type type; 1749 u16 size; 1750 struct ib_flow_tcp_udp_filter val; 1751 struct ib_flow_tcp_udp_filter mask; 1752 }; 1753 1754 union ib_flow_spec { 1755 struct { 1756 enum ib_flow_spec_type type; 1757 u16 size; 1758 }; 1759 struct ib_flow_spec_eth eth; 1760 struct ib_flow_spec_ib ib; 1761 struct ib_flow_spec_ipv4 ipv4; 1762 struct ib_flow_spec_tcp_udp tcp_udp; 1763 struct ib_flow_spec_ipv6 ipv6; 1764 }; 1765 1766 struct ib_flow_attr { 1767 enum ib_flow_attr_type type; 1768 u16 size; 1769 u16 priority; 1770 u32 flags; 1771 u8 num_of_specs; 1772 u8 port; 1773 /* Following are the optional layers according to user request 1774 * struct ib_flow_spec_xxx 1775 * struct ib_flow_spec_yyy 1776 */ 1777 }; 1778 1779 struct ib_flow { 1780 struct ib_qp *qp; 1781 struct ib_uobject *uobject; 1782 }; 1783 1784 struct ib_mad_hdr; 1785 struct ib_grh; 1786 1787 enum ib_process_mad_flags { 1788 IB_MAD_IGNORE_MKEY = 1, 1789 IB_MAD_IGNORE_BKEY = 2, 1790 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY 1791 }; 1792 1793 enum ib_mad_result { 1794 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ 1795 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ 1796 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ 1797 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ 1798 }; 1799 1800 #define IB_DEVICE_NAME_MAX 64 1801 1802 struct ib_cache { 1803 rwlock_t lock; 1804 struct ib_event_handler event_handler; 1805 struct ib_pkey_cache **pkey_cache; 1806 struct ib_gid_table **gid_cache; 1807 u8 *lmc_cache; 1808 }; 1809 1810 struct ib_dma_mapping_ops { 1811 int (*mapping_error)(struct ib_device *dev, 1812 u64 dma_addr); 1813 u64 (*map_single)(struct ib_device *dev, 1814 void *ptr, size_t size, 1815 enum dma_data_direction direction); 1816 void (*unmap_single)(struct ib_device *dev, 1817 u64 addr, size_t size, 1818 enum dma_data_direction direction); 1819 u64 (*map_page)(struct ib_device *dev, 1820 struct page *page, unsigned long offset, 1821 size_t size, 1822 enum dma_data_direction direction); 1823 void (*unmap_page)(struct ib_device *dev, 1824 u64 addr, size_t size, 1825 enum dma_data_direction direction); 1826 int (*map_sg)(struct ib_device *dev, 1827 struct scatterlist *sg, int nents, 1828 enum dma_data_direction direction); 1829 void (*unmap_sg)(struct ib_device *dev, 1830 struct scatterlist *sg, int nents, 1831 enum dma_data_direction direction); 1832 int (*map_sg_attrs)(struct ib_device *dev, 1833 struct scatterlist *sg, int nents, 1834 enum dma_data_direction direction, 1835 struct dma_attrs *attrs); 1836 void (*unmap_sg_attrs)(struct ib_device *dev, 1837 struct scatterlist *sg, int nents, 1838 enum dma_data_direction direction, 1839 struct dma_attrs *attrs); 1840 void (*sync_single_for_cpu)(struct ib_device *dev, 1841 u64 dma_handle, 1842 size_t size, 1843 enum dma_data_direction dir); 1844 void (*sync_single_for_device)(struct ib_device *dev, 1845 u64 dma_handle, 1846 size_t size, 1847 enum dma_data_direction dir); 1848 void *(*alloc_coherent)(struct ib_device *dev, 1849 size_t size, 1850 u64 *dma_handle, 1851 gfp_t flag); 1852 void (*free_coherent)(struct ib_device *dev, 1853 size_t size, void *cpu_addr, 1854 u64 dma_handle); 1855 }; 1856 1857 struct iw_cm_verbs; 1858 1859 struct ib_port_immutable { 1860 int pkey_tbl_len; 1861 int gid_tbl_len; 1862 u32 core_cap_flags; 1863 u32 max_mad_size; 1864 }; 1865 1866 struct ib_device { 1867 struct device *dma_device; 1868 1869 char name[IB_DEVICE_NAME_MAX]; 1870 1871 struct list_head event_handler_list; 1872 spinlock_t event_handler_lock; 1873 1874 spinlock_t client_data_lock; 1875 struct list_head core_list; 1876 /* Access to the client_data_list is protected by the client_data_lock 1877 * spinlock and the lists_rwsem read-write semaphore */ 1878 struct list_head client_data_list; 1879 1880 struct ib_cache cache; 1881 /** 1882 * port_immutable is indexed by port number 1883 */ 1884 struct ib_port_immutable *port_immutable; 1885 1886 int num_comp_vectors; 1887 1888 struct iw_cm_verbs *iwcm; 1889 1890 /** 1891 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the 1892 * driver initialized data. The struct is kfree()'ed by the sysfs 1893 * core when the device is removed. A lifespan of -1 in the return 1894 * struct tells the core to set a default lifespan. 1895 */ 1896 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device, 1897 u8 port_num); 1898 /** 1899 * get_hw_stats - Fill in the counter value(s) in the stats struct. 1900 * @index - The index in the value array we wish to have updated, or 1901 * num_counters if we want all stats updated 1902 * Return codes - 1903 * < 0 - Error, no counters updated 1904 * index - Updated the single counter pointed to by index 1905 * num_counters - Updated all counters (will reset the timestamp 1906 * and prevent further calls for lifespan milliseconds) 1907 * Drivers are allowed to update all counters in leiu of just the 1908 * one given in index at their option 1909 */ 1910 int (*get_hw_stats)(struct ib_device *device, 1911 struct rdma_hw_stats *stats, 1912 u8 port, int index); 1913 int (*query_device)(struct ib_device *device, 1914 struct ib_device_attr *device_attr, 1915 struct ib_udata *udata); 1916 int (*query_port)(struct ib_device *device, 1917 u8 port_num, 1918 struct ib_port_attr *port_attr); 1919 enum rdma_link_layer (*get_link_layer)(struct ib_device *device, 1920 u8 port_num); 1921 /* When calling get_netdev, the HW vendor's driver should return the 1922 * net device of device @device at port @port_num or NULL if such 1923 * a net device doesn't exist. The vendor driver should call dev_hold 1924 * on this net device. The HW vendor's device driver must guarantee 1925 * that this function returns NULL before the net device reaches 1926 * NETDEV_UNREGISTER_FINAL state. 1927 */ 1928 struct ifnet *(*get_netdev)(struct ib_device *device, 1929 u8 port_num); 1930 int (*query_gid)(struct ib_device *device, 1931 u8 port_num, int index, 1932 union ib_gid *gid); 1933 /* When calling add_gid, the HW vendor's driver should 1934 * add the gid of device @device at gid index @index of 1935 * port @port_num to be @gid. Meta-info of that gid (for example, 1936 * the network device related to this gid is available 1937 * at @attr. @context allows the HW vendor driver to store extra 1938 * information together with a GID entry. The HW vendor may allocate 1939 * memory to contain this information and store it in @context when a 1940 * new GID entry is written to. Params are consistent until the next 1941 * call of add_gid or delete_gid. The function should return 0 on 1942 * success or error otherwise. The function could be called 1943 * concurrently for different ports. This function is only called 1944 * when roce_gid_table is used. 1945 */ 1946 int (*add_gid)(struct ib_device *device, 1947 u8 port_num, 1948 unsigned int index, 1949 const union ib_gid *gid, 1950 const struct ib_gid_attr *attr, 1951 void **context); 1952 /* When calling del_gid, the HW vendor's driver should delete the 1953 * gid of device @device at gid index @index of port @port_num. 1954 * Upon the deletion of a GID entry, the HW vendor must free any 1955 * allocated memory. The caller will clear @context afterwards. 1956 * This function is only called when roce_gid_table is used. 1957 */ 1958 int (*del_gid)(struct ib_device *device, 1959 u8 port_num, 1960 unsigned int index, 1961 void **context); 1962 int (*query_pkey)(struct ib_device *device, 1963 u8 port_num, u16 index, u16 *pkey); 1964 int (*modify_device)(struct ib_device *device, 1965 int device_modify_mask, 1966 struct ib_device_modify *device_modify); 1967 int (*modify_port)(struct ib_device *device, 1968 u8 port_num, int port_modify_mask, 1969 struct ib_port_modify *port_modify); 1970 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device, 1971 struct ib_udata *udata); 1972 int (*dealloc_ucontext)(struct ib_ucontext *context); 1973 int (*mmap)(struct ib_ucontext *context, 1974 struct vm_area_struct *vma); 1975 struct ib_pd * (*alloc_pd)(struct ib_device *device, 1976 struct ib_ucontext *context, 1977 struct ib_udata *udata); 1978 int (*dealloc_pd)(struct ib_pd *pd); 1979 struct ib_ah * (*create_ah)(struct ib_pd *pd, 1980 struct ib_ah_attr *ah_attr, 1981 struct ib_udata *udata); 1982 int (*modify_ah)(struct ib_ah *ah, 1983 struct ib_ah_attr *ah_attr); 1984 int (*query_ah)(struct ib_ah *ah, 1985 struct ib_ah_attr *ah_attr); 1986 int (*destroy_ah)(struct ib_ah *ah); 1987 struct ib_srq * (*create_srq)(struct ib_pd *pd, 1988 struct ib_srq_init_attr *srq_init_attr, 1989 struct ib_udata *udata); 1990 int (*modify_srq)(struct ib_srq *srq, 1991 struct ib_srq_attr *srq_attr, 1992 enum ib_srq_attr_mask srq_attr_mask, 1993 struct ib_udata *udata); 1994 int (*query_srq)(struct ib_srq *srq, 1995 struct ib_srq_attr *srq_attr); 1996 int (*destroy_srq)(struct ib_srq *srq); 1997 int (*post_srq_recv)(struct ib_srq *srq, 1998 const struct ib_recv_wr *recv_wr, 1999 const struct ib_recv_wr **bad_recv_wr); 2000 struct ib_qp * (*create_qp)(struct ib_pd *pd, 2001 struct ib_qp_init_attr *qp_init_attr, 2002 struct ib_udata *udata); 2003 int (*modify_qp)(struct ib_qp *qp, 2004 struct ib_qp_attr *qp_attr, 2005 int qp_attr_mask, 2006 struct ib_udata *udata); 2007 int (*query_qp)(struct ib_qp *qp, 2008 struct ib_qp_attr *qp_attr, 2009 int qp_attr_mask, 2010 struct ib_qp_init_attr *qp_init_attr); 2011 int (*destroy_qp)(struct ib_qp *qp); 2012 int (*post_send)(struct ib_qp *qp, 2013 const struct ib_send_wr *send_wr, 2014 const struct ib_send_wr **bad_send_wr); 2015 int (*post_recv)(struct ib_qp *qp, 2016 const struct ib_recv_wr *recv_wr, 2017 const struct ib_recv_wr **bad_recv_wr); 2018 struct ib_cq * (*create_cq)(struct ib_device *device, 2019 const struct ib_cq_init_attr *attr, 2020 struct ib_ucontext *context, 2021 struct ib_udata *udata); 2022 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, 2023 u16 cq_period); 2024 int (*destroy_cq)(struct ib_cq *cq); 2025 int (*resize_cq)(struct ib_cq *cq, int cqe, 2026 struct ib_udata *udata); 2027 int (*poll_cq)(struct ib_cq *cq, int num_entries, 2028 struct ib_wc *wc); 2029 int (*peek_cq)(struct ib_cq *cq, int wc_cnt); 2030 int (*req_notify_cq)(struct ib_cq *cq, 2031 enum ib_cq_notify_flags flags); 2032 int (*req_ncomp_notif)(struct ib_cq *cq, 2033 int wc_cnt); 2034 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, 2035 int mr_access_flags); 2036 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd, 2037 u64 start, u64 length, 2038 u64 virt_addr, 2039 int mr_access_flags, 2040 struct ib_udata *udata); 2041 int (*rereg_user_mr)(struct ib_mr *mr, 2042 int flags, 2043 u64 start, u64 length, 2044 u64 virt_addr, 2045 int mr_access_flags, 2046 struct ib_pd *pd, 2047 struct ib_udata *udata); 2048 int (*dereg_mr)(struct ib_mr *mr); 2049 struct ib_mr * (*alloc_mr)(struct ib_pd *pd, 2050 enum ib_mr_type mr_type, 2051 u32 max_num_sg); 2052 int (*map_mr_sg)(struct ib_mr *mr, 2053 struct scatterlist *sg, 2054 int sg_nents, 2055 unsigned int *sg_offset); 2056 struct ib_mw * (*alloc_mw)(struct ib_pd *pd, 2057 enum ib_mw_type type, 2058 struct ib_udata *udata); 2059 int (*dealloc_mw)(struct ib_mw *mw); 2060 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, 2061 int mr_access_flags, 2062 struct ib_fmr_attr *fmr_attr); 2063 int (*map_phys_fmr)(struct ib_fmr *fmr, 2064 u64 *page_list, int list_len, 2065 u64 iova); 2066 int (*unmap_fmr)(struct list_head *fmr_list); 2067 int (*dealloc_fmr)(struct ib_fmr *fmr); 2068 int (*attach_mcast)(struct ib_qp *qp, 2069 union ib_gid *gid, 2070 u16 lid); 2071 int (*detach_mcast)(struct ib_qp *qp, 2072 union ib_gid *gid, 2073 u16 lid); 2074 int (*process_mad)(struct ib_device *device, 2075 int process_mad_flags, 2076 u8 port_num, 2077 const struct ib_wc *in_wc, 2078 const struct ib_grh *in_grh, 2079 const struct ib_mad_hdr *in_mad, 2080 size_t in_mad_size, 2081 struct ib_mad_hdr *out_mad, 2082 size_t *out_mad_size, 2083 u16 *out_mad_pkey_index); 2084 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device, 2085 struct ib_ucontext *ucontext, 2086 struct ib_udata *udata); 2087 int (*dealloc_xrcd)(struct ib_xrcd *xrcd); 2088 struct ib_flow * (*create_flow)(struct ib_qp *qp, 2089 struct ib_flow_attr 2090 *flow_attr, 2091 int domain); 2092 int (*destroy_flow)(struct ib_flow *flow_id); 2093 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, 2094 struct ib_mr_status *mr_status); 2095 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); 2096 void (*drain_rq)(struct ib_qp *qp); 2097 void (*drain_sq)(struct ib_qp *qp); 2098 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port, 2099 int state); 2100 int (*get_vf_config)(struct ib_device *device, int vf, u8 port, 2101 struct ifla_vf_info *ivf); 2102 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port, 2103 struct ifla_vf_stats *stats); 2104 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid, 2105 int type); 2106 struct ib_wq * (*create_wq)(struct ib_pd *pd, 2107 struct ib_wq_init_attr *init_attr, 2108 struct ib_udata *udata); 2109 int (*destroy_wq)(struct ib_wq *wq); 2110 int (*modify_wq)(struct ib_wq *wq, 2111 struct ib_wq_attr *attr, 2112 u32 wq_attr_mask, 2113 struct ib_udata *udata); 2114 struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device, 2115 struct ib_rwq_ind_table_init_attr *init_attr, 2116 struct ib_udata *udata); 2117 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); 2118 struct ib_dma_mapping_ops *dma_ops; 2119 2120 struct module *owner; 2121 struct device dev; 2122 struct kobject *ports_parent; 2123 struct list_head port_list; 2124 2125 enum { 2126 IB_DEV_UNINITIALIZED, 2127 IB_DEV_REGISTERED, 2128 IB_DEV_UNREGISTERED 2129 } reg_state; 2130 2131 int uverbs_abi_ver; 2132 u64 uverbs_cmd_mask; 2133 u64 uverbs_ex_cmd_mask; 2134 2135 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 2136 __be64 node_guid; 2137 u32 local_dma_lkey; 2138 u16 is_switch:1; 2139 u8 node_type; 2140 u8 phys_port_cnt; 2141 struct ib_device_attr attrs; 2142 struct attribute_group *hw_stats_ag; 2143 struct rdma_hw_stats *hw_stats; 2144 2145 /** 2146 * The following mandatory functions are used only at device 2147 * registration. Keep functions such as these at the end of this 2148 * structure to avoid cache line misses when accessing struct ib_device 2149 * in fast paths. 2150 */ 2151 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *); 2152 void (*get_dev_fw_str)(struct ib_device *, char *str, size_t str_len); 2153 }; 2154 2155 struct ib_client { 2156 char *name; 2157 void (*add) (struct ib_device *); 2158 void (*remove)(struct ib_device *, void *client_data); 2159 2160 /* Returns the net_dev belonging to this ib_client and matching the 2161 * given parameters. 2162 * @dev: An RDMA device that the net_dev use for communication. 2163 * @port: A physical port number on the RDMA device. 2164 * @pkey: P_Key that the net_dev uses if applicable. 2165 * @gid: A GID that the net_dev uses to communicate. 2166 * @addr: An IP address the net_dev is configured with. 2167 * @client_data: The device's client data set by ib_set_client_data(). 2168 * 2169 * An ib_client that implements a net_dev on top of RDMA devices 2170 * (such as IP over IB) should implement this callback, allowing the 2171 * rdma_cm module to find the right net_dev for a given request. 2172 * 2173 * The caller is responsible for calling dev_put on the returned 2174 * netdev. */ 2175 struct ifnet *(*get_net_dev_by_params)( 2176 struct ib_device *dev, 2177 u8 port, 2178 u16 pkey, 2179 const union ib_gid *gid, 2180 const struct sockaddr *addr, 2181 void *client_data); 2182 struct list_head list; 2183 }; 2184 2185 struct ib_device *ib_alloc_device(size_t size); 2186 void ib_dealloc_device(struct ib_device *device); 2187 2188 void ib_get_device_fw_str(struct ib_device *device, char *str, size_t str_len); 2189 2190 int ib_register_device(struct ib_device *device, 2191 int (*port_callback)(struct ib_device *, 2192 u8, struct kobject *)); 2193 void ib_unregister_device(struct ib_device *device); 2194 2195 int ib_register_client (struct ib_client *client); 2196 void ib_unregister_client(struct ib_client *client); 2197 2198 void *ib_get_client_data(struct ib_device *device, struct ib_client *client); 2199 void ib_set_client_data(struct ib_device *device, struct ib_client *client, 2200 void *data); 2201 2202 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) 2203 { 2204 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; 2205 } 2206 2207 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) 2208 { 2209 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; 2210 } 2211 2212 static inline bool ib_is_udata_cleared(struct ib_udata *udata, 2213 size_t offset, 2214 size_t len) 2215 { 2216 const void __user *p = (const char __user *)udata->inbuf + offset; 2217 bool ret; 2218 u8 *buf; 2219 2220 if (len > USHRT_MAX) 2221 return false; 2222 2223 buf = memdup_user(p, len); 2224 if (IS_ERR(buf)) 2225 return false; 2226 2227 ret = !memchr_inv(buf, 0, len); 2228 kfree(buf); 2229 return ret; 2230 } 2231 2232 /** 2233 * ib_is_destroy_retryable - Check whether the uobject destruction 2234 * is retryable. 2235 * @ret: The initial destruction return code 2236 * @why: remove reason 2237 * @uobj: The uobject that is destroyed 2238 * 2239 * This function is a helper function that IB layer and low-level drivers 2240 * can use to consider whether the destruction of the given uobject is 2241 * retry-able. 2242 * It checks the original return code, if it wasn't success the destruction 2243 * is retryable according to the ucontext state (i.e. cleanup_retryable) and 2244 * the remove reason. (i.e. why). 2245 * Must be called with the object locked for destroy. 2246 */ 2247 static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why, 2248 struct ib_uobject *uobj) 2249 { 2250 return ret && (why == RDMA_REMOVE_DESTROY || 2251 uobj->context->cleanup_retryable); 2252 } 2253 2254 /** 2255 * ib_destroy_usecnt - Called during destruction to check the usecnt 2256 * @usecnt: The usecnt atomic 2257 * @why: remove reason 2258 * @uobj: The uobject that is destroyed 2259 * 2260 * Non-zero usecnts will block destruction unless destruction was triggered by 2261 * a ucontext cleanup. 2262 */ 2263 static inline int ib_destroy_usecnt(atomic_t *usecnt, 2264 enum rdma_remove_reason why, 2265 struct ib_uobject *uobj) 2266 { 2267 if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj)) 2268 return -EBUSY; 2269 return 0; 2270 } 2271 2272 /** 2273 * ib_modify_qp_is_ok - Check that the supplied attribute mask 2274 * contains all required attributes and no attributes not allowed for 2275 * the given QP state transition. 2276 * @cur_state: Current QP state 2277 * @next_state: Next QP state 2278 * @type: QP type 2279 * @mask: Mask of supplied QP attributes 2280 * 2281 * This function is a helper function that a low-level driver's 2282 * modify_qp method can use to validate the consumer's input. It 2283 * checks that cur_state and next_state are valid QP states, that a 2284 * transition from cur_state to next_state is allowed by the IB spec, 2285 * and that the attribute mask supplied is allowed for the transition. 2286 */ 2287 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 2288 enum ib_qp_type type, enum ib_qp_attr_mask mask); 2289 2290 int ib_register_event_handler (struct ib_event_handler *event_handler); 2291 int ib_unregister_event_handler(struct ib_event_handler *event_handler); 2292 void ib_dispatch_event(struct ib_event *event); 2293 2294 int ib_query_port(struct ib_device *device, 2295 u8 port_num, struct ib_port_attr *port_attr); 2296 2297 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, 2298 u8 port_num); 2299 2300 /** 2301 * rdma_cap_ib_switch - Check if the device is IB switch 2302 * @device: Device to check 2303 * 2304 * Device driver is responsible for setting is_switch bit on 2305 * in ib_device structure at init time. 2306 * 2307 * Return: true if the device is IB switch. 2308 */ 2309 static inline bool rdma_cap_ib_switch(const struct ib_device *device) 2310 { 2311 return device->is_switch; 2312 } 2313 2314 /** 2315 * rdma_start_port - Return the first valid port number for the device 2316 * specified 2317 * 2318 * @device: Device to be checked 2319 * 2320 * Return start port number 2321 */ 2322 static inline u8 rdma_start_port(const struct ib_device *device) 2323 { 2324 return rdma_cap_ib_switch(device) ? 0 : 1; 2325 } 2326 2327 /** 2328 * rdma_end_port - Return the last valid port number for the device 2329 * specified 2330 * 2331 * @device: Device to be checked 2332 * 2333 * Return last port number 2334 */ 2335 static inline u8 rdma_end_port(const struct ib_device *device) 2336 { 2337 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; 2338 } 2339 2340 static inline int rdma_is_port_valid(const struct ib_device *device, 2341 unsigned int port) 2342 { 2343 return (port >= rdma_start_port(device) && 2344 port <= rdma_end_port(device)); 2345 } 2346 2347 static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num) 2348 { 2349 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB; 2350 } 2351 2352 static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num) 2353 { 2354 return device->port_immutable[port_num].core_cap_flags & 2355 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP); 2356 } 2357 2358 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num) 2359 { 2360 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; 2361 } 2362 2363 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num) 2364 { 2365 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE; 2366 } 2367 2368 static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num) 2369 { 2370 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP; 2371 } 2372 2373 static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num) 2374 { 2375 return rdma_protocol_ib(device, port_num) || 2376 rdma_protocol_roce(device, port_num); 2377 } 2378 2379 /** 2380 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband 2381 * Management Datagrams. 2382 * @device: Device to check 2383 * @port_num: Port number to check 2384 * 2385 * Management Datagrams (MAD) are a required part of the InfiniBand 2386 * specification and are supported on all InfiniBand devices. A slightly 2387 * extended version are also supported on OPA interfaces. 2388 * 2389 * Return: true if the port supports sending/receiving of MAD packets. 2390 */ 2391 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num) 2392 { 2393 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD; 2394 } 2395 2396 /** 2397 * rdma_cap_opa_mad - Check if the port of device provides support for OPA 2398 * Management Datagrams. 2399 * @device: Device to check 2400 * @port_num: Port number to check 2401 * 2402 * Intel OmniPath devices extend and/or replace the InfiniBand Management 2403 * datagrams with their own versions. These OPA MADs share many but not all of 2404 * the characteristics of InfiniBand MADs. 2405 * 2406 * OPA MADs differ in the following ways: 2407 * 2408 * 1) MADs are variable size up to 2K 2409 * IBTA defined MADs remain fixed at 256 bytes 2410 * 2) OPA SMPs must carry valid PKeys 2411 * 3) OPA SMP packets are a different format 2412 * 2413 * Return: true if the port supports OPA MAD packet formats. 2414 */ 2415 static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num) 2416 { 2417 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD) 2418 == RDMA_CORE_CAP_OPA_MAD; 2419 } 2420 2421 /** 2422 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband 2423 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI). 2424 * @device: Device to check 2425 * @port_num: Port number to check 2426 * 2427 * Each InfiniBand node is required to provide a Subnet Management Agent 2428 * that the subnet manager can access. Prior to the fabric being fully 2429 * configured by the subnet manager, the SMA is accessed via a well known 2430 * interface called the Subnet Management Interface (SMI). This interface 2431 * uses directed route packets to communicate with the SM to get around the 2432 * chicken and egg problem of the SM needing to know what's on the fabric 2433 * in order to configure the fabric, and needing to configure the fabric in 2434 * order to send packets to the devices on the fabric. These directed 2435 * route packets do not need the fabric fully configured in order to reach 2436 * their destination. The SMI is the only method allowed to send 2437 * directed route packets on an InfiniBand fabric. 2438 * 2439 * Return: true if the port provides an SMI. 2440 */ 2441 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num) 2442 { 2443 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI; 2444 } 2445 2446 /** 2447 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband 2448 * Communication Manager. 2449 * @device: Device to check 2450 * @port_num: Port number to check 2451 * 2452 * The InfiniBand Communication Manager is one of many pre-defined General 2453 * Service Agents (GSA) that are accessed via the General Service 2454 * Interface (GSI). It's role is to facilitate establishment of connections 2455 * between nodes as well as other management related tasks for established 2456 * connections. 2457 * 2458 * Return: true if the port supports an IB CM (this does not guarantee that 2459 * a CM is actually running however). 2460 */ 2461 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num) 2462 { 2463 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM; 2464 } 2465 2466 /** 2467 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP 2468 * Communication Manager. 2469 * @device: Device to check 2470 * @port_num: Port number to check 2471 * 2472 * Similar to above, but specific to iWARP connections which have a different 2473 * managment protocol than InfiniBand. 2474 * 2475 * Return: true if the port supports an iWARP CM (this does not guarantee that 2476 * a CM is actually running however). 2477 */ 2478 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num) 2479 { 2480 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM; 2481 } 2482 2483 /** 2484 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband 2485 * Subnet Administration. 2486 * @device: Device to check 2487 * @port_num: Port number to check 2488 * 2489 * An InfiniBand Subnet Administration (SA) service is a pre-defined General 2490 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand 2491 * fabrics, devices should resolve routes to other hosts by contacting the 2492 * SA to query the proper route. 2493 * 2494 * Return: true if the port should act as a client to the fabric Subnet 2495 * Administration interface. This does not imply that the SA service is 2496 * running locally. 2497 */ 2498 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num) 2499 { 2500 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA; 2501 } 2502 2503 /** 2504 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband 2505 * Multicast. 2506 * @device: Device to check 2507 * @port_num: Port number to check 2508 * 2509 * InfiniBand multicast registration is more complex than normal IPv4 or 2510 * IPv6 multicast registration. Each Host Channel Adapter must register 2511 * with the Subnet Manager when it wishes to join a multicast group. It 2512 * should do so only once regardless of how many queue pairs it subscribes 2513 * to this group. And it should leave the group only after all queue pairs 2514 * attached to the group have been detached. 2515 * 2516 * Return: true if the port must undertake the additional adminstrative 2517 * overhead of registering/unregistering with the SM and tracking of the 2518 * total number of queue pairs attached to the multicast group. 2519 */ 2520 static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num) 2521 { 2522 return rdma_cap_ib_sa(device, port_num); 2523 } 2524 2525 /** 2526 * rdma_cap_af_ib - Check if the port of device has the capability 2527 * Native Infiniband Address. 2528 * @device: Device to check 2529 * @port_num: Port number to check 2530 * 2531 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default 2532 * GID. RoCE uses a different mechanism, but still generates a GID via 2533 * a prescribed mechanism and port specific data. 2534 * 2535 * Return: true if the port uses a GID address to identify devices on the 2536 * network. 2537 */ 2538 static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num) 2539 { 2540 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB; 2541 } 2542 2543 /** 2544 * rdma_cap_eth_ah - Check if the port of device has the capability 2545 * Ethernet Address Handle. 2546 * @device: Device to check 2547 * @port_num: Port number to check 2548 * 2549 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique 2550 * to fabricate GIDs over Ethernet/IP specific addresses native to the 2551 * port. Normally, packet headers are generated by the sending host 2552 * adapter, but when sending connectionless datagrams, we must manually 2553 * inject the proper headers for the fabric we are communicating over. 2554 * 2555 * Return: true if we are running as a RoCE port and must force the 2556 * addition of a Global Route Header built from our Ethernet Address 2557 * Handle into our header list for connectionless packets. 2558 */ 2559 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num) 2560 { 2561 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH; 2562 } 2563 2564 /** 2565 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port. 2566 * 2567 * @device: Device 2568 * @port_num: Port number 2569 * 2570 * This MAD size includes the MAD headers and MAD payload. No other headers 2571 * are included. 2572 * 2573 * Return the max MAD size required by the Port. Will return 0 if the port 2574 * does not support MADs 2575 */ 2576 static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num) 2577 { 2578 return device->port_immutable[port_num].max_mad_size; 2579 } 2580 2581 /** 2582 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table 2583 * @device: Device to check 2584 * @port_num: Port number to check 2585 * 2586 * RoCE GID table mechanism manages the various GIDs for a device. 2587 * 2588 * NOTE: if allocating the port's GID table has failed, this call will still 2589 * return true, but any RoCE GID table API will fail. 2590 * 2591 * Return: true if the port uses RoCE GID table mechanism in order to manage 2592 * its GIDs. 2593 */ 2594 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device, 2595 u8 port_num) 2596 { 2597 return rdma_protocol_roce(device, port_num) && 2598 device->add_gid && device->del_gid; 2599 } 2600 2601 /* 2602 * Check if the device supports READ W/ INVALIDATE. 2603 */ 2604 static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num) 2605 { 2606 /* 2607 * iWarp drivers must support READ W/ INVALIDATE. No other protocol 2608 * has support for it yet. 2609 */ 2610 return rdma_protocol_iwarp(dev, port_num); 2611 } 2612 2613 int ib_query_gid(struct ib_device *device, 2614 u8 port_num, int index, union ib_gid *gid, 2615 struct ib_gid_attr *attr); 2616 2617 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, 2618 int state); 2619 int ib_get_vf_config(struct ib_device *device, int vf, u8 port, 2620 struct ifla_vf_info *info); 2621 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, 2622 struct ifla_vf_stats *stats); 2623 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, 2624 int type); 2625 2626 int ib_query_pkey(struct ib_device *device, 2627 u8 port_num, u16 index, u16 *pkey); 2628 2629 int ib_modify_device(struct ib_device *device, 2630 int device_modify_mask, 2631 struct ib_device_modify *device_modify); 2632 2633 int ib_modify_port(struct ib_device *device, 2634 u8 port_num, int port_modify_mask, 2635 struct ib_port_modify *port_modify); 2636 2637 int ib_find_gid(struct ib_device *device, union ib_gid *gid, 2638 enum ib_gid_type gid_type, struct ifnet *ndev, 2639 u8 *port_num, u16 *index); 2640 2641 int ib_find_pkey(struct ib_device *device, 2642 u8 port_num, u16 pkey, u16 *index); 2643 2644 enum ib_pd_flags { 2645 /* 2646 * Create a memory registration for all memory in the system and place 2647 * the rkey for it into pd->unsafe_global_rkey. This can be used by 2648 * ULPs to avoid the overhead of dynamic MRs. 2649 * 2650 * This flag is generally considered unsafe and must only be used in 2651 * extremly trusted environments. Every use of it will log a warning 2652 * in the kernel log. 2653 */ 2654 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01, 2655 }; 2656 2657 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, 2658 const char *caller); 2659 #define ib_alloc_pd(device, flags) \ 2660 __ib_alloc_pd((device), (flags), __func__) 2661 void ib_dealloc_pd(struct ib_pd *pd); 2662 2663 /** 2664 * ib_create_ah - Creates an address handle for the given address vector. 2665 * @pd: The protection domain associated with the address handle. 2666 * @ah_attr: The attributes of the address vector. 2667 * 2668 * The address handle is used to reference a local or global destination 2669 * in all UD QP post sends. 2670 */ 2671 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); 2672 2673 /** 2674 * ib_init_ah_from_wc - Initializes address handle attributes from a 2675 * work completion. 2676 * @device: Device on which the received message arrived. 2677 * @port_num: Port on which the received message arrived. 2678 * @wc: Work completion associated with the received message. 2679 * @grh: References the received global route header. This parameter is 2680 * ignored unless the work completion indicates that the GRH is valid. 2681 * @ah_attr: Returned attributes that can be used when creating an address 2682 * handle for replying to the message. 2683 */ 2684 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, 2685 const struct ib_wc *wc, const struct ib_grh *grh, 2686 struct ib_ah_attr *ah_attr); 2687 2688 /** 2689 * ib_create_ah_from_wc - Creates an address handle associated with the 2690 * sender of the specified work completion. 2691 * @pd: The protection domain associated with the address handle. 2692 * @wc: Work completion information associated with a received message. 2693 * @grh: References the received global route header. This parameter is 2694 * ignored unless the work completion indicates that the GRH is valid. 2695 * @port_num: The outbound port number to associate with the address. 2696 * 2697 * The address handle is used to reference a local or global destination 2698 * in all UD QP post sends. 2699 */ 2700 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 2701 const struct ib_grh *grh, u8 port_num); 2702 2703 /** 2704 * ib_modify_ah - Modifies the address vector associated with an address 2705 * handle. 2706 * @ah: The address handle to modify. 2707 * @ah_attr: The new address vector attributes to associate with the 2708 * address handle. 2709 */ 2710 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 2711 2712 /** 2713 * ib_query_ah - Queries the address vector associated with an address 2714 * handle. 2715 * @ah: The address handle to query. 2716 * @ah_attr: The address vector attributes associated with the address 2717 * handle. 2718 */ 2719 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 2720 2721 /** 2722 * ib_destroy_ah - Destroys an address handle. 2723 * @ah: The address handle to destroy. 2724 */ 2725 int ib_destroy_ah(struct ib_ah *ah); 2726 2727 /** 2728 * ib_create_srq - Creates a SRQ associated with the specified protection 2729 * domain. 2730 * @pd: The protection domain associated with the SRQ. 2731 * @srq_init_attr: A list of initial attributes required to create the 2732 * SRQ. If SRQ creation succeeds, then the attributes are updated to 2733 * the actual capabilities of the created SRQ. 2734 * 2735 * srq_attr->max_wr and srq_attr->max_sge are read the determine the 2736 * requested size of the SRQ, and set to the actual values allocated 2737 * on return. If ib_create_srq() succeeds, then max_wr and max_sge 2738 * will always be at least as large as the requested values. 2739 */ 2740 struct ib_srq *ib_create_srq(struct ib_pd *pd, 2741 struct ib_srq_init_attr *srq_init_attr); 2742 2743 /** 2744 * ib_modify_srq - Modifies the attributes for the specified SRQ. 2745 * @srq: The SRQ to modify. 2746 * @srq_attr: On input, specifies the SRQ attributes to modify. On output, 2747 * the current values of selected SRQ attributes are returned. 2748 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ 2749 * are being modified. 2750 * 2751 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or 2752 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when 2753 * the number of receives queued drops below the limit. 2754 */ 2755 int ib_modify_srq(struct ib_srq *srq, 2756 struct ib_srq_attr *srq_attr, 2757 enum ib_srq_attr_mask srq_attr_mask); 2758 2759 /** 2760 * ib_query_srq - Returns the attribute list and current values for the 2761 * specified SRQ. 2762 * @srq: The SRQ to query. 2763 * @srq_attr: The attributes of the specified SRQ. 2764 */ 2765 int ib_query_srq(struct ib_srq *srq, 2766 struct ib_srq_attr *srq_attr); 2767 2768 /** 2769 * ib_destroy_srq - Destroys the specified SRQ. 2770 * @srq: The SRQ to destroy. 2771 */ 2772 int ib_destroy_srq(struct ib_srq *srq); 2773 2774 /** 2775 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. 2776 * @srq: The SRQ to post the work request on. 2777 * @recv_wr: A list of work requests to post on the receive queue. 2778 * @bad_recv_wr: On an immediate failure, this parameter will reference 2779 * the work request that failed to be posted on the QP. 2780 */ 2781 static inline int ib_post_srq_recv(struct ib_srq *srq, 2782 const struct ib_recv_wr *recv_wr, 2783 const struct ib_recv_wr **bad_recv_wr) 2784 { 2785 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr); 2786 } 2787 2788 /** 2789 * ib_create_qp - Creates a QP associated with the specified protection 2790 * domain. 2791 * @pd: The protection domain associated with the QP. 2792 * @qp_init_attr: A list of initial attributes required to create the 2793 * QP. If QP creation succeeds, then the attributes are updated to 2794 * the actual capabilities of the created QP. 2795 */ 2796 struct ib_qp *ib_create_qp(struct ib_pd *pd, 2797 struct ib_qp_init_attr *qp_init_attr); 2798 2799 /** 2800 * ib_modify_qp - Modifies the attributes for the specified QP and then 2801 * transitions the QP to the given state. 2802 * @qp: The QP to modify. 2803 * @qp_attr: On input, specifies the QP attributes to modify. On output, 2804 * the current values of selected QP attributes are returned. 2805 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP 2806 * are being modified. 2807 */ 2808 int ib_modify_qp(struct ib_qp *qp, 2809 struct ib_qp_attr *qp_attr, 2810 int qp_attr_mask); 2811 2812 /** 2813 * ib_query_qp - Returns the attribute list and current values for the 2814 * specified QP. 2815 * @qp: The QP to query. 2816 * @qp_attr: The attributes of the specified QP. 2817 * @qp_attr_mask: A bit-mask used to select specific attributes to query. 2818 * @qp_init_attr: Additional attributes of the selected QP. 2819 * 2820 * The qp_attr_mask may be used to limit the query to gathering only the 2821 * selected attributes. 2822 */ 2823 int ib_query_qp(struct ib_qp *qp, 2824 struct ib_qp_attr *qp_attr, 2825 int qp_attr_mask, 2826 struct ib_qp_init_attr *qp_init_attr); 2827 2828 /** 2829 * ib_destroy_qp - Destroys the specified QP. 2830 * @qp: The QP to destroy. 2831 */ 2832 int ib_destroy_qp(struct ib_qp *qp); 2833 2834 /** 2835 * ib_open_qp - Obtain a reference to an existing sharable QP. 2836 * @xrcd - XRC domain 2837 * @qp_open_attr: Attributes identifying the QP to open. 2838 * 2839 * Returns a reference to a sharable QP. 2840 */ 2841 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 2842 struct ib_qp_open_attr *qp_open_attr); 2843 2844 /** 2845 * ib_close_qp - Release an external reference to a QP. 2846 * @qp: The QP handle to release 2847 * 2848 * The opened QP handle is released by the caller. The underlying 2849 * shared QP is not destroyed until all internal references are released. 2850 */ 2851 int ib_close_qp(struct ib_qp *qp); 2852 2853 /** 2854 * ib_post_send - Posts a list of work requests to the send queue of 2855 * the specified QP. 2856 * @qp: The QP to post the work request on. 2857 * @send_wr: A list of work requests to post on the send queue. 2858 * @bad_send_wr: On an immediate failure, this parameter will reference 2859 * the work request that failed to be posted on the QP. 2860 * 2861 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate 2862 * error is returned, the QP state shall not be affected, 2863 * ib_post_send() will return an immediate error after queueing any 2864 * earlier work requests in the list. 2865 */ 2866 static inline int ib_post_send(struct ib_qp *qp, 2867 const struct ib_send_wr *send_wr, 2868 const struct ib_send_wr **bad_send_wr) 2869 { 2870 return qp->device->post_send(qp, send_wr, bad_send_wr); 2871 } 2872 2873 /** 2874 * ib_post_recv - Posts a list of work requests to the receive queue of 2875 * the specified QP. 2876 * @qp: The QP to post the work request on. 2877 * @recv_wr: A list of work requests to post on the receive queue. 2878 * @bad_recv_wr: On an immediate failure, this parameter will reference 2879 * the work request that failed to be posted on the QP. 2880 */ 2881 static inline int ib_post_recv(struct ib_qp *qp, 2882 const struct ib_recv_wr *recv_wr, 2883 const struct ib_recv_wr **bad_recv_wr) 2884 { 2885 return qp->device->post_recv(qp, recv_wr, bad_recv_wr); 2886 } 2887 2888 struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, 2889 int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx); 2890 void ib_free_cq(struct ib_cq *cq); 2891 2892 /** 2893 * ib_create_cq - Creates a CQ on the specified device. 2894 * @device: The device on which to create the CQ. 2895 * @comp_handler: A user-specified callback that is invoked when a 2896 * completion event occurs on the CQ. 2897 * @event_handler: A user-specified callback that is invoked when an 2898 * asynchronous event not associated with a completion occurs on the CQ. 2899 * @cq_context: Context associated with the CQ returned to the user via 2900 * the associated completion and event handlers. 2901 * @cq_attr: The attributes the CQ should be created upon. 2902 * 2903 * Users can examine the cq structure to determine the actual CQ size. 2904 */ 2905 struct ib_cq *ib_create_cq(struct ib_device *device, 2906 ib_comp_handler comp_handler, 2907 void (*event_handler)(struct ib_event *, void *), 2908 void *cq_context, 2909 const struct ib_cq_init_attr *cq_attr); 2910 2911 /** 2912 * ib_resize_cq - Modifies the capacity of the CQ. 2913 * @cq: The CQ to resize. 2914 * @cqe: The minimum size of the CQ. 2915 * 2916 * Users can examine the cq structure to determine the actual CQ size. 2917 */ 2918 int ib_resize_cq(struct ib_cq *cq, int cqe); 2919 2920 /** 2921 * ib_modify_cq - Modifies moderation params of the CQ 2922 * @cq: The CQ to modify. 2923 * @cq_count: number of CQEs that will trigger an event 2924 * @cq_period: max period of time in usec before triggering an event 2925 * 2926 */ 2927 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); 2928 2929 /** 2930 * ib_destroy_cq - Destroys the specified CQ. 2931 * @cq: The CQ to destroy. 2932 */ 2933 int ib_destroy_cq(struct ib_cq *cq); 2934 2935 /** 2936 * ib_poll_cq - poll a CQ for completion(s) 2937 * @cq:the CQ being polled 2938 * @num_entries:maximum number of completions to return 2939 * @wc:array of at least @num_entries &struct ib_wc where completions 2940 * will be returned 2941 * 2942 * Poll a CQ for (possibly multiple) completions. If the return value 2943 * is < 0, an error occurred. If the return value is >= 0, it is the 2944 * number of completions returned. If the return value is 2945 * non-negative and < num_entries, then the CQ was emptied. 2946 */ 2947 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, 2948 struct ib_wc *wc) 2949 { 2950 return cq->device->poll_cq(cq, num_entries, wc); 2951 } 2952 2953 /** 2954 * ib_peek_cq - Returns the number of unreaped completions currently 2955 * on the specified CQ. 2956 * @cq: The CQ to peek. 2957 * @wc_cnt: A minimum number of unreaped completions to check for. 2958 * 2959 * If the number of unreaped completions is greater than or equal to wc_cnt, 2960 * this function returns wc_cnt, otherwise, it returns the actual number of 2961 * unreaped completions. 2962 */ 2963 int ib_peek_cq(struct ib_cq *cq, int wc_cnt); 2964 2965 /** 2966 * ib_req_notify_cq - Request completion notification on a CQ. 2967 * @cq: The CQ to generate an event for. 2968 * @flags: 2969 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP 2970 * to request an event on the next solicited event or next work 2971 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS 2972 * may also be |ed in to request a hint about missed events, as 2973 * described below. 2974 * 2975 * Return Value: 2976 * < 0 means an error occurred while requesting notification 2977 * == 0 means notification was requested successfully, and if 2978 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events 2979 * were missed and it is safe to wait for another event. In 2980 * this case is it guaranteed that any work completions added 2981 * to the CQ since the last CQ poll will trigger a completion 2982 * notification event. 2983 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed 2984 * in. It means that the consumer must poll the CQ again to 2985 * make sure it is empty to avoid missing an event because of a 2986 * race between requesting notification and an entry being 2987 * added to the CQ. This return value means it is possible 2988 * (but not guaranteed) that a work completion has been added 2989 * to the CQ since the last poll without triggering a 2990 * completion notification event. 2991 */ 2992 static inline int ib_req_notify_cq(struct ib_cq *cq, 2993 enum ib_cq_notify_flags flags) 2994 { 2995 return cq->device->req_notify_cq(cq, flags); 2996 } 2997 2998 /** 2999 * ib_req_ncomp_notif - Request completion notification when there are 3000 * at least the specified number of unreaped completions on the CQ. 3001 * @cq: The CQ to generate an event for. 3002 * @wc_cnt: The number of unreaped completions that should be on the 3003 * CQ before an event is generated. 3004 */ 3005 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) 3006 { 3007 return cq->device->req_ncomp_notif ? 3008 cq->device->req_ncomp_notif(cq, wc_cnt) : 3009 -ENOSYS; 3010 } 3011 3012 /** 3013 * ib_dma_mapping_error - check a DMA addr for error 3014 * @dev: The device for which the dma_addr was created 3015 * @dma_addr: The DMA address to check 3016 */ 3017 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 3018 { 3019 if (dev->dma_ops) 3020 return dev->dma_ops->mapping_error(dev, dma_addr); 3021 return dma_mapping_error(dev->dma_device, dma_addr); 3022 } 3023 3024 /** 3025 * ib_dma_map_single - Map a kernel virtual address to DMA address 3026 * @dev: The device for which the dma_addr is to be created 3027 * @cpu_addr: The kernel virtual address 3028 * @size: The size of the region in bytes 3029 * @direction: The direction of the DMA 3030 */ 3031 static inline u64 ib_dma_map_single(struct ib_device *dev, 3032 void *cpu_addr, size_t size, 3033 enum dma_data_direction direction) 3034 { 3035 if (dev->dma_ops) 3036 return dev->dma_ops->map_single(dev, cpu_addr, size, direction); 3037 return dma_map_single(dev->dma_device, cpu_addr, size, direction); 3038 } 3039 3040 /** 3041 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() 3042 * @dev: The device for which the DMA address was created 3043 * @addr: The DMA address 3044 * @size: The size of the region in bytes 3045 * @direction: The direction of the DMA 3046 */ 3047 static inline void ib_dma_unmap_single(struct ib_device *dev, 3048 u64 addr, size_t size, 3049 enum dma_data_direction direction) 3050 { 3051 if (dev->dma_ops) 3052 dev->dma_ops->unmap_single(dev, addr, size, direction); 3053 else 3054 dma_unmap_single(dev->dma_device, addr, size, direction); 3055 } 3056 3057 static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, 3058 void *cpu_addr, size_t size, 3059 enum dma_data_direction direction, 3060 struct dma_attrs *dma_attrs) 3061 { 3062 return dma_map_single_attrs(dev->dma_device, cpu_addr, size, 3063 direction, dma_attrs); 3064 } 3065 3066 static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, 3067 u64 addr, size_t size, 3068 enum dma_data_direction direction, 3069 struct dma_attrs *dma_attrs) 3070 { 3071 return dma_unmap_single_attrs(dev->dma_device, addr, size, 3072 direction, dma_attrs); 3073 } 3074 3075 /** 3076 * ib_dma_map_page - Map a physical page to DMA address 3077 * @dev: The device for which the dma_addr is to be created 3078 * @page: The page to be mapped 3079 * @offset: The offset within the page 3080 * @size: The size of the region in bytes 3081 * @direction: The direction of the DMA 3082 */ 3083 static inline u64 ib_dma_map_page(struct ib_device *dev, 3084 struct page *page, 3085 unsigned long offset, 3086 size_t size, 3087 enum dma_data_direction direction) 3088 { 3089 if (dev->dma_ops) 3090 return dev->dma_ops->map_page(dev, page, offset, size, direction); 3091 return dma_map_page(dev->dma_device, page, offset, size, direction); 3092 } 3093 3094 /** 3095 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() 3096 * @dev: The device for which the DMA address was created 3097 * @addr: The DMA address 3098 * @size: The size of the region in bytes 3099 * @direction: The direction of the DMA 3100 */ 3101 static inline void ib_dma_unmap_page(struct ib_device *dev, 3102 u64 addr, size_t size, 3103 enum dma_data_direction direction) 3104 { 3105 if (dev->dma_ops) 3106 dev->dma_ops->unmap_page(dev, addr, size, direction); 3107 else 3108 dma_unmap_page(dev->dma_device, addr, size, direction); 3109 } 3110 3111 /** 3112 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses 3113 * @dev: The device for which the DMA addresses are to be created 3114 * @sg: The array of scatter/gather entries 3115 * @nents: The number of scatter/gather entries 3116 * @direction: The direction of the DMA 3117 */ 3118 static inline int ib_dma_map_sg(struct ib_device *dev, 3119 struct scatterlist *sg, int nents, 3120 enum dma_data_direction direction) 3121 { 3122 if (dev->dma_ops) 3123 return dev->dma_ops->map_sg(dev, sg, nents, direction); 3124 return dma_map_sg(dev->dma_device, sg, nents, direction); 3125 } 3126 3127 /** 3128 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses 3129 * @dev: The device for which the DMA addresses were created 3130 * @sg: The array of scatter/gather entries 3131 * @nents: The number of scatter/gather entries 3132 * @direction: The direction of the DMA 3133 */ 3134 static inline void ib_dma_unmap_sg(struct ib_device *dev, 3135 struct scatterlist *sg, int nents, 3136 enum dma_data_direction direction) 3137 { 3138 if (dev->dma_ops) 3139 dev->dma_ops->unmap_sg(dev, sg, nents, direction); 3140 else 3141 dma_unmap_sg(dev->dma_device, sg, nents, direction); 3142 } 3143 3144 static inline int ib_dma_map_sg_attrs(struct ib_device *dev, 3145 struct scatterlist *sg, int nents, 3146 enum dma_data_direction direction, 3147 struct dma_attrs *dma_attrs) 3148 { 3149 if (dev->dma_ops) 3150 return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction, 3151 dma_attrs); 3152 else 3153 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, 3154 dma_attrs); 3155 } 3156 3157 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 3158 struct scatterlist *sg, int nents, 3159 enum dma_data_direction direction, 3160 struct dma_attrs *dma_attrs) 3161 { 3162 if (dev->dma_ops) 3163 return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction, 3164 dma_attrs); 3165 else 3166 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, 3167 dma_attrs); 3168 } 3169 /** 3170 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry 3171 * @dev: The device for which the DMA addresses were created 3172 * @sg: The scatter/gather entry 3173 * 3174 * Note: this function is obsolete. To do: change all occurrences of 3175 * ib_sg_dma_address() into sg_dma_address(). 3176 */ 3177 static inline u64 ib_sg_dma_address(struct ib_device *dev, 3178 struct scatterlist *sg) 3179 { 3180 return sg_dma_address(sg); 3181 } 3182 3183 /** 3184 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry 3185 * @dev: The device for which the DMA addresses were created 3186 * @sg: The scatter/gather entry 3187 * 3188 * Note: this function is obsolete. To do: change all occurrences of 3189 * ib_sg_dma_len() into sg_dma_len(). 3190 */ 3191 static inline unsigned int ib_sg_dma_len(struct ib_device *dev, 3192 struct scatterlist *sg) 3193 { 3194 return sg_dma_len(sg); 3195 } 3196 3197 /** 3198 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU 3199 * @dev: The device for which the DMA address was created 3200 * @addr: The DMA address 3201 * @size: The size of the region in bytes 3202 * @dir: The direction of the DMA 3203 */ 3204 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, 3205 u64 addr, 3206 size_t size, 3207 enum dma_data_direction dir) 3208 { 3209 if (dev->dma_ops) 3210 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); 3211 else 3212 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 3213 } 3214 3215 /** 3216 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device 3217 * @dev: The device for which the DMA address was created 3218 * @addr: The DMA address 3219 * @size: The size of the region in bytes 3220 * @dir: The direction of the DMA 3221 */ 3222 static inline void ib_dma_sync_single_for_device(struct ib_device *dev, 3223 u64 addr, 3224 size_t size, 3225 enum dma_data_direction dir) 3226 { 3227 if (dev->dma_ops) 3228 dev->dma_ops->sync_single_for_device(dev, addr, size, dir); 3229 else 3230 dma_sync_single_for_device(dev->dma_device, addr, size, dir); 3231 } 3232 3233 /** 3234 * ib_dma_alloc_coherent - Allocate memory and map it for DMA 3235 * @dev: The device for which the DMA address is requested 3236 * @size: The size of the region to allocate in bytes 3237 * @dma_handle: A pointer for returning the DMA address of the region 3238 * @flag: memory allocator flags 3239 */ 3240 static inline void *ib_dma_alloc_coherent(struct ib_device *dev, 3241 size_t size, 3242 u64 *dma_handle, 3243 gfp_t flag) 3244 { 3245 if (dev->dma_ops) 3246 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); 3247 else { 3248 dma_addr_t handle; 3249 void *ret; 3250 3251 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag); 3252 *dma_handle = handle; 3253 return ret; 3254 } 3255 } 3256 3257 /** 3258 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent() 3259 * @dev: The device for which the DMA addresses were allocated 3260 * @size: The size of the region 3261 * @cpu_addr: the address returned by ib_dma_alloc_coherent() 3262 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent() 3263 */ 3264 static inline void ib_dma_free_coherent(struct ib_device *dev, 3265 size_t size, void *cpu_addr, 3266 u64 dma_handle) 3267 { 3268 if (dev->dma_ops) 3269 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 3270 else 3271 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); 3272 } 3273 3274 /** 3275 * ib_dereg_mr - Deregisters a memory region and removes it from the 3276 * HCA translation table. 3277 * @mr: The memory region to deregister. 3278 * 3279 * This function can fail, if the memory region has memory windows bound to it. 3280 */ 3281 int ib_dereg_mr(struct ib_mr *mr); 3282 3283 struct ib_mr *ib_alloc_mr(struct ib_pd *pd, 3284 enum ib_mr_type mr_type, 3285 u32 max_num_sg); 3286 3287 /** 3288 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR 3289 * R_Key and L_Key. 3290 * @mr - struct ib_mr pointer to be updated. 3291 * @newkey - new key to be used. 3292 */ 3293 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey) 3294 { 3295 mr->lkey = (mr->lkey & 0xffffff00) | newkey; 3296 mr->rkey = (mr->rkey & 0xffffff00) | newkey; 3297 } 3298 3299 /** 3300 * ib_inc_rkey - increments the key portion of the given rkey. Can be used 3301 * for calculating a new rkey for type 2 memory windows. 3302 * @rkey - the rkey to increment. 3303 */ 3304 static inline u32 ib_inc_rkey(u32 rkey) 3305 { 3306 const u32 mask = 0x000000ff; 3307 return ((rkey + 1) & mask) | (rkey & ~mask); 3308 } 3309 3310 /** 3311 * ib_alloc_fmr - Allocates a unmapped fast memory region. 3312 * @pd: The protection domain associated with the unmapped region. 3313 * @mr_access_flags: Specifies the memory access rights. 3314 * @fmr_attr: Attributes of the unmapped region. 3315 * 3316 * A fast memory region must be mapped before it can be used as part of 3317 * a work request. 3318 */ 3319 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 3320 int mr_access_flags, 3321 struct ib_fmr_attr *fmr_attr); 3322 3323 /** 3324 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. 3325 * @fmr: The fast memory region to associate with the pages. 3326 * @page_list: An array of physical pages to map to the fast memory region. 3327 * @list_len: The number of pages in page_list. 3328 * @iova: The I/O virtual address to use with the mapped region. 3329 */ 3330 static inline int ib_map_phys_fmr(struct ib_fmr *fmr, 3331 u64 *page_list, int list_len, 3332 u64 iova) 3333 { 3334 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); 3335 } 3336 3337 /** 3338 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions. 3339 * @fmr_list: A linked list of fast memory regions to unmap. 3340 */ 3341 int ib_unmap_fmr(struct list_head *fmr_list); 3342 3343 /** 3344 * ib_dealloc_fmr - Deallocates a fast memory region. 3345 * @fmr: The fast memory region to deallocate. 3346 */ 3347 int ib_dealloc_fmr(struct ib_fmr *fmr); 3348 3349 /** 3350 * ib_attach_mcast - Attaches the specified QP to a multicast group. 3351 * @qp: QP to attach to the multicast group. The QP must be type 3352 * IB_QPT_UD. 3353 * @gid: Multicast group GID. 3354 * @lid: Multicast group LID in host byte order. 3355 * 3356 * In order to send and receive multicast packets, subnet 3357 * administration must have created the multicast group and configured 3358 * the fabric appropriately. The port associated with the specified 3359 * QP must also be a member of the multicast group. 3360 */ 3361 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 3362 3363 /** 3364 * ib_detach_mcast - Detaches the specified QP from a multicast group. 3365 * @qp: QP to detach from the multicast group. 3366 * @gid: Multicast group GID. 3367 * @lid: Multicast group LID in host byte order. 3368 */ 3369 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 3370 3371 /** 3372 * ib_alloc_xrcd - Allocates an XRC domain. 3373 * @device: The device on which to allocate the XRC domain. 3374 */ 3375 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device); 3376 3377 /** 3378 * ib_dealloc_xrcd - Deallocates an XRC domain. 3379 * @xrcd: The XRC domain to deallocate. 3380 */ 3381 int ib_dealloc_xrcd(struct ib_xrcd *xrcd); 3382 3383 struct ib_flow *ib_create_flow(struct ib_qp *qp, 3384 struct ib_flow_attr *flow_attr, int domain); 3385 int ib_destroy_flow(struct ib_flow *flow_id); 3386 3387 static inline int ib_check_mr_access(int flags) 3388 { 3389 /* 3390 * Local write permission is required if remote write or 3391 * remote atomic permission is also requested. 3392 */ 3393 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 3394 !(flags & IB_ACCESS_LOCAL_WRITE)) 3395 return -EINVAL; 3396 3397 return 0; 3398 } 3399 3400 /** 3401 * ib_check_mr_status: lightweight check of MR status. 3402 * This routine may provide status checks on a selected 3403 * ib_mr. first use is for signature status check. 3404 * 3405 * @mr: A memory region. 3406 * @check_mask: Bitmask of which checks to perform from 3407 * ib_mr_status_check enumeration. 3408 * @mr_status: The container of relevant status checks. 3409 * failed checks will be indicated in the status bitmask 3410 * and the relevant info shall be in the error item. 3411 */ 3412 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 3413 struct ib_mr_status *mr_status); 3414 3415 struct ifnet *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, 3416 u16 pkey, const union ib_gid *gid, 3417 const struct sockaddr *addr); 3418 struct ib_wq *ib_create_wq(struct ib_pd *pd, 3419 struct ib_wq_init_attr *init_attr); 3420 int ib_destroy_wq(struct ib_wq *wq); 3421 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr, 3422 u32 wq_attr_mask); 3423 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, 3424 struct ib_rwq_ind_table_init_attr* 3425 wq_ind_table_init_attr); 3426 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); 3427 3428 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 3429 unsigned int *sg_offset, unsigned int page_size); 3430 3431 static inline int 3432 ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 3433 unsigned int *sg_offset, unsigned int page_size) 3434 { 3435 int n; 3436 3437 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size); 3438 mr->iova = 0; 3439 3440 return n; 3441 } 3442 3443 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, 3444 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64)); 3445 3446 void ib_drain_rq(struct ib_qp *qp); 3447 void ib_drain_sq(struct ib_qp *qp); 3448 void ib_drain_qp(struct ib_qp *qp); 3449 3450 struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile); 3451 3452 int ib_resolve_eth_dmac(struct ib_device *device, 3453 struct ib_ah_attr *ah_attr); 3454 #endif /* IB_VERBS_H */ 3455