1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef _CORE_PRIV_H 34 #define _CORE_PRIV_H 35 36 #include <linux/list.h> 37 #include <linux/spinlock.h> 38 39 #include <rdma/ib_verbs.h> 40 41 #include <net/if_vlan_var.h> 42 43 #ifdef CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS 44 int cma_configfs_init(void); 45 void cma_configfs_exit(void); 46 #else 47 static inline int cma_configfs_init(void) 48 { 49 return 0; 50 } 51 52 static inline void cma_configfs_exit(void) 53 { 54 } 55 #endif 56 struct cma_device; 57 void cma_ref_dev(struct cma_device *cma_dev); 58 void cma_deref_dev(struct cma_device *cma_dev); 59 typedef bool (*cma_device_filter)(struct ib_device *, void *); 60 struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, 61 void *cookie); 62 int cma_get_default_gid_type(struct cma_device *cma_dev, 63 unsigned int port); 64 int cma_set_default_gid_type(struct cma_device *cma_dev, 65 unsigned int port, 66 enum ib_gid_type default_gid_type); 67 struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev); 68 69 int ib_device_register_sysfs(struct ib_device *device, 70 int (*port_callback)(struct ib_device *, 71 u8, struct kobject *)); 72 void ib_device_unregister_sysfs(struct ib_device *device); 73 74 void ib_cache_setup(void); 75 void ib_cache_cleanup(void); 76 77 int ib_resolve_eth_dmac(struct ib_qp *qp, 78 struct ib_qp_attr *qp_attr, int *qp_attr_mask); 79 80 typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port, 81 struct net_device *idev, void *cookie); 82 83 typedef int (*roce_netdev_filter)(struct ib_device *device, u8 port, 84 struct net_device *idev, void *cookie); 85 86 void ib_enum_roce_netdev(struct ib_device *ib_dev, 87 roce_netdev_filter filter, 88 void *filter_cookie, 89 roce_netdev_callback cb, 90 void *cookie); 91 void ib_enum_all_roce_netdevs(roce_netdev_filter filter, 92 void *filter_cookie, 93 roce_netdev_callback cb, 94 void *cookie); 95 96 enum ib_cache_gid_default_mode { 97 IB_CACHE_GID_DEFAULT_MODE_SET, 98 IB_CACHE_GID_DEFAULT_MODE_DELETE 99 }; 100 101 int ib_cache_gid_parse_type_str(const char *buf); 102 103 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type); 104 105 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, 106 struct net_device *ndev, 107 unsigned long gid_type_mask, 108 enum ib_cache_gid_default_mode mode); 109 110 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, 111 union ib_gid *gid, struct ib_gid_attr *attr); 112 113 int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, 114 union ib_gid *gid, struct ib_gid_attr *attr); 115 116 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, 117 struct net_device *ndev); 118 void ib_cache_gid_del_all_by_netdev(struct net_device *ndev); 119 120 int roce_gid_mgmt_init(void); 121 void roce_gid_mgmt_cleanup(void); 122 123 int roce_rescan_device(struct ib_device *ib_dev); 124 unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port); 125 126 int ib_cache_setup_one(struct ib_device *device); 127 void ib_cache_cleanup_one(struct ib_device *device); 128 void ib_cache_release_one(struct ib_device *device); 129 130 static inline bool rdma_is_upper_dev_rcu(struct net_device *dev, 131 struct net_device *upper) 132 { 133 134 /* TODO: add support for LAGG */ 135 upper = VLAN_TRUNKDEV(upper); 136 137 return (dev == upper); 138 } 139 140 int addr_init(void); 141 void addr_cleanup(void); 142 143 int ib_mad_init(void); 144 void ib_mad_cleanup(void); 145 146 int ib_sa_init(void); 147 void ib_sa_cleanup(void); 148 149 #endif /* _CORE_PRIV_H */ 150