1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies */
3 
4 #include <linux/mlx5/vport.h>
5 #include <rdma/ib_verbs.h>
6 #include <net/addrconf.h>
7 
8 #include "lib/mlx5.h"
9 #include "eswitch.h"
10 #include "fs_core.h"
11 #include "rdma.h"
12 
mlx5_rdma_disable_roce_steering(struct mlx5_core_dev * dev)13 static void mlx5_rdma_disable_roce_steering(struct mlx5_core_dev *dev)
14 {
15 	struct mlx5_core_roce *roce = &dev->priv.roce;
16 
17 	mlx5_del_flow_rules(roce->allow_rule);
18 	mlx5_destroy_flow_group(roce->fg);
19 	mlx5_destroy_flow_table(roce->ft);
20 }
21 
mlx5_rdma_enable_roce_steering(struct mlx5_core_dev * dev)22 static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
23 {
24 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
25 	struct mlx5_core_roce *roce = &dev->priv.roce;
26 	struct mlx5_flow_handle *flow_rule = NULL;
27 	struct mlx5_flow_table_attr ft_attr = {};
28 	struct mlx5_flow_namespace *ns = NULL;
29 	struct mlx5_flow_act flow_act = {};
30 	struct mlx5_flow_spec *spec;
31 	struct mlx5_flow_table *ft;
32 	struct mlx5_flow_group *fg;
33 	struct mlx5_eswitch *esw;
34 	u32 *flow_group_in;
35 	int err;
36 
37 	if (!(MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
38 	      MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)))
39 		return -EOPNOTSUPP;
40 
41 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
42 	if (!flow_group_in)
43 		return -ENOMEM;
44 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
45 	if (!spec) {
46 		kvfree(flow_group_in);
47 		return -ENOMEM;
48 	}
49 
50 	ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL);
51 	if (!ns) {
52 		mlx5_core_err(dev, "Failed to get RDMA RX namespace");
53 		err = -EOPNOTSUPP;
54 		goto free;
55 	}
56 
57 	ft_attr.max_fte = 1;
58 	ft = mlx5_create_flow_table(ns, &ft_attr);
59 	if (IS_ERR(ft)) {
60 		mlx5_core_err(dev, "Failed to create RDMA RX flow table");
61 		err = PTR_ERR(ft);
62 		goto free;
63 	}
64 
65 	esw = dev->priv.eswitch;
66 	mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
67 
68 	fg = mlx5_create_flow_group(ft, flow_group_in);
69 	if (IS_ERR(fg)) {
70 		err = PTR_ERR(fg);
71 		mlx5_core_err(dev, "Failed to create RDMA RX flow group err(%d)\n", err);
72 		goto destroy_flow_table;
73 	}
74 
75 	mlx5_esw_set_spec_source_port(esw, esw->manager_vport, spec);
76 
77 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
78 	flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, NULL, 0);
79 	if (IS_ERR(flow_rule)) {
80 		err = PTR_ERR(flow_rule);
81 		mlx5_core_err(dev, "Failed to add RoCE allow rule, err=%d\n",
82 			      err);
83 		goto destroy_flow_group;
84 	}
85 
86 	kvfree(spec);
87 	kvfree(flow_group_in);
88 	roce->ft = ft;
89 	roce->fg = fg;
90 	roce->allow_rule = flow_rule;
91 
92 	return 0;
93 
94 destroy_flow_group:
95 	mlx5_destroy_flow_group(fg);
96 destroy_flow_table:
97 	mlx5_destroy_flow_table(ft);
98 free:
99 	kvfree(spec);
100 	kvfree(flow_group_in);
101 	return err;
102 }
103 
mlx5_rdma_del_roce_addr(struct mlx5_core_dev * dev)104 static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev)
105 {
106 	mlx5_core_roce_gid_set(dev, 0, MLX5_ROCE_VERSION_2, 0,
107 			       NULL, NULL, false, 0, 1);
108 }
109 
mlx5_rdma_make_default_gid(struct mlx5_core_dev * dev,union ib_gid * gid)110 static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid)
111 {
112 	u8 hw_id[ETH_ALEN];
113 
114 	mlx5_query_mac_address(dev, hw_id);
115 	gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
116 	addrconf_addr_eui48(&gid->raw[8], hw_id);
117 }
118 
mlx5_rdma_add_roce_addr(struct mlx5_core_dev * dev)119 static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
120 {
121 	union ib_gid gid;
122 	u8 mac[ETH_ALEN];
123 
124 	mlx5_rdma_make_default_gid(dev, &gid);
125 	return mlx5_core_roce_gid_set(dev, 0,
126 				      MLX5_ROCE_VERSION_2,
127 				      0, gid.raw, mac,
128 				      false, 0, 1);
129 }
130 
mlx5_rdma_disable_roce(struct mlx5_core_dev * dev)131 void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev)
132 {
133 	struct mlx5_core_roce *roce = &dev->priv.roce;
134 
135 	if (!roce->ft)
136 		return;
137 
138 	mlx5_rdma_disable_roce_steering(dev);
139 	mlx5_rdma_del_roce_addr(dev);
140 	mlx5_nic_vport_disable_roce(dev);
141 }
142 
mlx5_rdma_enable_roce(struct mlx5_core_dev * dev)143 void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
144 {
145 	int err;
146 
147 	if (!MLX5_CAP_GEN(dev, roce))
148 		return;
149 
150 	err = mlx5_nic_vport_enable_roce(dev);
151 	if (err) {
152 		mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
153 		return;
154 	}
155 
156 	err = mlx5_rdma_add_roce_addr(dev);
157 	if (err) {
158 		mlx5_core_err(dev, "Failed to add RoCE address: %d\n", err);
159 		goto disable_roce;
160 	}
161 
162 	err = mlx5_rdma_enable_roce_steering(dev);
163 	if (err) {
164 		mlx5_core_err(dev, "Failed to enable RoCE steering: %d\n", err);
165 		goto del_roce_addr;
166 	}
167 
168 	return;
169 
170 del_roce_addr:
171 	mlx5_rdma_del_roce_addr(dev);
172 disable_roce:
173 	mlx5_nic_vport_disable_roce(dev);
174 }
175