1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26 
27 #define HCLGE_NAME			"hclge"
28 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 
31 #define HCLGE_BUF_SIZE_UNIT	256U
32 #define HCLGE_BUF_MUL_BY	2
33 #define HCLGE_BUF_DIV_BY	2
34 #define NEED_RESERVE_TC_NUM	2
35 #define BUF_MAX_PERCENT		100
36 #define BUF_RESERVE_PERCENT	90
37 
38 #define HCLGE_RESET_MAX_FAIL_CNT	5
39 #define HCLGE_RESET_SYNC_TIME		100
40 #define HCLGE_PF_RESET_SYNC_TIME	20
41 #define HCLGE_PF_RESET_SYNC_CNT		1500
42 
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET        1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
47 #define HCLGE_DFX_IGU_BD_OFFSET         4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
50 #define HCLGE_DFX_NCSI_BD_OFFSET        7
51 #define HCLGE_DFX_RTC_BD_OFFSET         8
52 #define HCLGE_DFX_PPP_BD_OFFSET         9
53 #define HCLGE_DFX_RCB_BD_OFFSET         10
54 #define HCLGE_DFX_TQP_BD_OFFSET         11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
56 
57 #define HCLGE_LINK_STATUS_MS	10
58 
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
67 						   unsigned long *addr);
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
69 
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
73 static void hclge_sync_fd_table(struct hclge_dev *hdev);
74 
75 static struct hnae3_ae_algo ae_algo;
76 
77 static struct workqueue_struct *hclge_wq;
78 
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88 	/* required last entry */
89 	{0, }
90 };
91 
92 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93 
94 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95 					 HCLGE_CMDQ_TX_ADDR_H_REG,
96 					 HCLGE_CMDQ_TX_DEPTH_REG,
97 					 HCLGE_CMDQ_TX_TAIL_REG,
98 					 HCLGE_CMDQ_TX_HEAD_REG,
99 					 HCLGE_CMDQ_RX_ADDR_L_REG,
100 					 HCLGE_CMDQ_RX_ADDR_H_REG,
101 					 HCLGE_CMDQ_RX_DEPTH_REG,
102 					 HCLGE_CMDQ_RX_TAIL_REG,
103 					 HCLGE_CMDQ_RX_HEAD_REG,
104 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
105 					 HCLGE_CMDQ_INTR_STS_REG,
106 					 HCLGE_CMDQ_INTR_EN_REG,
107 					 HCLGE_CMDQ_INTR_GEN_REG};
108 
109 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110 					   HCLGE_VECTOR0_OTER_EN_REG,
111 					   HCLGE_MISC_RESET_STS_REG,
112 					   HCLGE_MISC_VECTOR_INT_STS,
113 					   HCLGE_GLOBAL_RESET_REG,
114 					   HCLGE_FUN_RST_ING,
115 					   HCLGE_GRO_EN_REG};
116 
117 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118 					 HCLGE_RING_RX_ADDR_H_REG,
119 					 HCLGE_RING_RX_BD_NUM_REG,
120 					 HCLGE_RING_RX_BD_LENGTH_REG,
121 					 HCLGE_RING_RX_MERGE_EN_REG,
122 					 HCLGE_RING_RX_TAIL_REG,
123 					 HCLGE_RING_RX_HEAD_REG,
124 					 HCLGE_RING_RX_FBD_NUM_REG,
125 					 HCLGE_RING_RX_OFFSET_REG,
126 					 HCLGE_RING_RX_FBD_OFFSET_REG,
127 					 HCLGE_RING_RX_STASH_REG,
128 					 HCLGE_RING_RX_BD_ERR_REG,
129 					 HCLGE_RING_TX_ADDR_L_REG,
130 					 HCLGE_RING_TX_ADDR_H_REG,
131 					 HCLGE_RING_TX_BD_NUM_REG,
132 					 HCLGE_RING_TX_PRIORITY_REG,
133 					 HCLGE_RING_TX_TC_REG,
134 					 HCLGE_RING_TX_MERGE_EN_REG,
135 					 HCLGE_RING_TX_TAIL_REG,
136 					 HCLGE_RING_TX_HEAD_REG,
137 					 HCLGE_RING_TX_FBD_NUM_REG,
138 					 HCLGE_RING_TX_OFFSET_REG,
139 					 HCLGE_RING_TX_EBD_NUM_REG,
140 					 HCLGE_RING_TX_EBD_OFFSET_REG,
141 					 HCLGE_RING_TX_BD_ERR_REG,
142 					 HCLGE_RING_EN_REG};
143 
144 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145 					     HCLGE_TQP_INTR_GL0_REG,
146 					     HCLGE_TQP_INTR_GL1_REG,
147 					     HCLGE_TQP_INTR_GL2_REG,
148 					     HCLGE_TQP_INTR_RL_REG};
149 
150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 	"App    Loopback test",
152 	"Serdes serial Loopback test",
153 	"Serdes parallel Loopback test",
154 	"Phy    Loopback test"
155 };
156 
157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158 	{"mac_tx_mac_pause_num",
159 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160 	{"mac_rx_mac_pause_num",
161 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162 	{"mac_tx_control_pkt_num",
163 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164 	{"mac_rx_control_pkt_num",
165 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166 	{"mac_tx_pfc_pkt_num",
167 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168 	{"mac_tx_pfc_pri0_pkt_num",
169 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170 	{"mac_tx_pfc_pri1_pkt_num",
171 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172 	{"mac_tx_pfc_pri2_pkt_num",
173 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174 	{"mac_tx_pfc_pri3_pkt_num",
175 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176 	{"mac_tx_pfc_pri4_pkt_num",
177 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178 	{"mac_tx_pfc_pri5_pkt_num",
179 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180 	{"mac_tx_pfc_pri6_pkt_num",
181 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182 	{"mac_tx_pfc_pri7_pkt_num",
183 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184 	{"mac_rx_pfc_pkt_num",
185 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186 	{"mac_rx_pfc_pri0_pkt_num",
187 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188 	{"mac_rx_pfc_pri1_pkt_num",
189 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190 	{"mac_rx_pfc_pri2_pkt_num",
191 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192 	{"mac_rx_pfc_pri3_pkt_num",
193 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194 	{"mac_rx_pfc_pri4_pkt_num",
195 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196 	{"mac_rx_pfc_pri5_pkt_num",
197 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198 	{"mac_rx_pfc_pri6_pkt_num",
199 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200 	{"mac_rx_pfc_pri7_pkt_num",
201 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202 	{"mac_tx_total_pkt_num",
203 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204 	{"mac_tx_total_oct_num",
205 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206 	{"mac_tx_good_pkt_num",
207 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208 	{"mac_tx_bad_pkt_num",
209 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210 	{"mac_tx_good_oct_num",
211 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212 	{"mac_tx_bad_oct_num",
213 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214 	{"mac_tx_uni_pkt_num",
215 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216 	{"mac_tx_multi_pkt_num",
217 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218 	{"mac_tx_broad_pkt_num",
219 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220 	{"mac_tx_undersize_pkt_num",
221 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222 	{"mac_tx_oversize_pkt_num",
223 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224 	{"mac_tx_64_oct_pkt_num",
225 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226 	{"mac_tx_65_127_oct_pkt_num",
227 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228 	{"mac_tx_128_255_oct_pkt_num",
229 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230 	{"mac_tx_256_511_oct_pkt_num",
231 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232 	{"mac_tx_512_1023_oct_pkt_num",
233 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234 	{"mac_tx_1024_1518_oct_pkt_num",
235 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236 	{"mac_tx_1519_2047_oct_pkt_num",
237 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238 	{"mac_tx_2048_4095_oct_pkt_num",
239 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240 	{"mac_tx_4096_8191_oct_pkt_num",
241 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242 	{"mac_tx_8192_9216_oct_pkt_num",
243 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244 	{"mac_tx_9217_12287_oct_pkt_num",
245 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246 	{"mac_tx_12288_16383_oct_pkt_num",
247 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248 	{"mac_tx_1519_max_good_pkt_num",
249 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250 	{"mac_tx_1519_max_bad_pkt_num",
251 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252 	{"mac_rx_total_pkt_num",
253 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254 	{"mac_rx_total_oct_num",
255 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256 	{"mac_rx_good_pkt_num",
257 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258 	{"mac_rx_bad_pkt_num",
259 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260 	{"mac_rx_good_oct_num",
261 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262 	{"mac_rx_bad_oct_num",
263 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264 	{"mac_rx_uni_pkt_num",
265 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266 	{"mac_rx_multi_pkt_num",
267 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268 	{"mac_rx_broad_pkt_num",
269 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270 	{"mac_rx_undersize_pkt_num",
271 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272 	{"mac_rx_oversize_pkt_num",
273 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274 	{"mac_rx_64_oct_pkt_num",
275 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276 	{"mac_rx_65_127_oct_pkt_num",
277 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278 	{"mac_rx_128_255_oct_pkt_num",
279 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280 	{"mac_rx_256_511_oct_pkt_num",
281 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282 	{"mac_rx_512_1023_oct_pkt_num",
283 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284 	{"mac_rx_1024_1518_oct_pkt_num",
285 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286 	{"mac_rx_1519_2047_oct_pkt_num",
287 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288 	{"mac_rx_2048_4095_oct_pkt_num",
289 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290 	{"mac_rx_4096_8191_oct_pkt_num",
291 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292 	{"mac_rx_8192_9216_oct_pkt_num",
293 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294 	{"mac_rx_9217_12287_oct_pkt_num",
295 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296 	{"mac_rx_12288_16383_oct_pkt_num",
297 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298 	{"mac_rx_1519_max_good_pkt_num",
299 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300 	{"mac_rx_1519_max_bad_pkt_num",
301 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302 
303 	{"mac_tx_fragment_pkt_num",
304 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305 	{"mac_tx_undermin_pkt_num",
306 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307 	{"mac_tx_jabber_pkt_num",
308 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309 	{"mac_tx_err_all_pkt_num",
310 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311 	{"mac_tx_from_app_good_pkt_num",
312 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313 	{"mac_tx_from_app_bad_pkt_num",
314 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315 	{"mac_rx_fragment_pkt_num",
316 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317 	{"mac_rx_undermin_pkt_num",
318 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319 	{"mac_rx_jabber_pkt_num",
320 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321 	{"mac_rx_fcs_err_pkt_num",
322 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323 	{"mac_rx_send_app_good_pkt_num",
324 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325 	{"mac_rx_send_app_bad_pkt_num",
326 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
327 };
328 
329 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 	{
331 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
333 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334 		.i_port_bitmap = 0x1,
335 	},
336 };
337 
338 static const u8 hclge_hash_key[] = {
339 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
344 };
345 
346 static const u32 hclge_dfx_bd_offset_list[] = {
347 	HCLGE_DFX_BIOS_BD_OFFSET,
348 	HCLGE_DFX_SSU_0_BD_OFFSET,
349 	HCLGE_DFX_SSU_1_BD_OFFSET,
350 	HCLGE_DFX_IGU_BD_OFFSET,
351 	HCLGE_DFX_RPU_0_BD_OFFSET,
352 	HCLGE_DFX_RPU_1_BD_OFFSET,
353 	HCLGE_DFX_NCSI_BD_OFFSET,
354 	HCLGE_DFX_RTC_BD_OFFSET,
355 	HCLGE_DFX_PPP_BD_OFFSET,
356 	HCLGE_DFX_RCB_BD_OFFSET,
357 	HCLGE_DFX_TQP_BD_OFFSET,
358 	HCLGE_DFX_SSU_2_BD_OFFSET
359 };
360 
361 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
363 	HCLGE_OPC_DFX_SSU_REG_0,
364 	HCLGE_OPC_DFX_SSU_REG_1,
365 	HCLGE_OPC_DFX_IGU_EGU_REG,
366 	HCLGE_OPC_DFX_RPU_REG_0,
367 	HCLGE_OPC_DFX_RPU_REG_1,
368 	HCLGE_OPC_DFX_NCSI_REG,
369 	HCLGE_OPC_DFX_RTC_REG,
370 	HCLGE_OPC_DFX_PPP_REG,
371 	HCLGE_OPC_DFX_RCB_REG,
372 	HCLGE_OPC_DFX_TQP_REG,
373 	HCLGE_OPC_DFX_SSU_REG_2
374 };
375 
376 static const struct key_info meta_data_key_info[] = {
377 	{ PACKET_TYPE_ID, 6},
378 	{ IP_FRAGEMENT, 1},
379 	{ ROCE_TYPE, 1},
380 	{ NEXT_KEY, 5},
381 	{ VLAN_NUMBER, 2},
382 	{ SRC_VPORT, 12},
383 	{ DST_VPORT, 12},
384 	{ TUNNEL_PACKET, 1},
385 };
386 
387 static const struct key_info tuple_key_info[] = {
388 	{ OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
389 	{ OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
390 	{ OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
391 	{ OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
392 	{ OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
393 	{ OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
394 	{ OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
395 	{ OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
396 	{ OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
397 	{ OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
398 	{ OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
399 	{ OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
400 	{ OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
401 	{ OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
402 	{ OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
403 	{ OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
404 	{ INNER_DST_MAC, 48, KEY_OPT_MAC,
405 	  offsetof(struct hclge_fd_rule, tuples.dst_mac),
406 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
407 	{ INNER_SRC_MAC, 48, KEY_OPT_MAC,
408 	  offsetof(struct hclge_fd_rule, tuples.src_mac),
409 	  offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
410 	{ INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
411 	  offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
412 	  offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
413 	{ INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
414 	{ INNER_ETH_TYPE, 16, KEY_OPT_LE16,
415 	  offsetof(struct hclge_fd_rule, tuples.ether_proto),
416 	  offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
417 	{ INNER_L2_RSV, 16, KEY_OPT_LE16,
418 	  offsetof(struct hclge_fd_rule, tuples.l2_user_def),
419 	  offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
420 	{ INNER_IP_TOS, 8, KEY_OPT_U8,
421 	  offsetof(struct hclge_fd_rule, tuples.ip_tos),
422 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
423 	{ INNER_IP_PROTO, 8, KEY_OPT_U8,
424 	  offsetof(struct hclge_fd_rule, tuples.ip_proto),
425 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
426 	{ INNER_SRC_IP, 32, KEY_OPT_IP,
427 	  offsetof(struct hclge_fd_rule, tuples.src_ip),
428 	  offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
429 	{ INNER_DST_IP, 32, KEY_OPT_IP,
430 	  offsetof(struct hclge_fd_rule, tuples.dst_ip),
431 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
432 	{ INNER_L3_RSV, 16, KEY_OPT_LE16,
433 	  offsetof(struct hclge_fd_rule, tuples.l3_user_def),
434 	  offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
435 	{ INNER_SRC_PORT, 16, KEY_OPT_LE16,
436 	  offsetof(struct hclge_fd_rule, tuples.src_port),
437 	  offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
438 	{ INNER_DST_PORT, 16, KEY_OPT_LE16,
439 	  offsetof(struct hclge_fd_rule, tuples.dst_port),
440 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
441 	{ INNER_L4_RSV, 32, KEY_OPT_LE32,
442 	  offsetof(struct hclge_fd_rule, tuples.l4_user_def),
443 	  offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
444 };
445 
hclge_mac_update_stats_defective(struct hclge_dev * hdev)446 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
447 {
448 #define HCLGE_MAC_CMD_NUM 21
449 
450 	u64 *data = (u64 *)(&hdev->mac_stats);
451 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
452 	__le64 *desc_data;
453 	int i, k, n;
454 	int ret;
455 
456 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
457 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
458 	if (ret) {
459 		dev_err(&hdev->pdev->dev,
460 			"Get MAC pkt stats fail, status = %d.\n", ret);
461 
462 		return ret;
463 	}
464 
465 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
466 		/* for special opcode 0032, only the first desc has the head */
467 		if (unlikely(i == 0)) {
468 			desc_data = (__le64 *)(&desc[i].data[0]);
469 			n = HCLGE_RD_FIRST_STATS_NUM;
470 		} else {
471 			desc_data = (__le64 *)(&desc[i]);
472 			n = HCLGE_RD_OTHER_STATS_NUM;
473 		}
474 
475 		for (k = 0; k < n; k++) {
476 			*data += le64_to_cpu(*desc_data);
477 			data++;
478 			desc_data++;
479 		}
480 	}
481 
482 	return 0;
483 }
484 
hclge_mac_update_stats_complete(struct hclge_dev * hdev,u32 desc_num)485 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
486 {
487 	u64 *data = (u64 *)(&hdev->mac_stats);
488 	struct hclge_desc *desc;
489 	__le64 *desc_data;
490 	u16 i, k, n;
491 	int ret;
492 
493 	/* This may be called inside atomic sections,
494 	 * so GFP_ATOMIC is more suitalbe here
495 	 */
496 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
497 	if (!desc)
498 		return -ENOMEM;
499 
500 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
501 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
502 	if (ret) {
503 		kfree(desc);
504 		return ret;
505 	}
506 
507 	for (i = 0; i < desc_num; i++) {
508 		/* for special opcode 0034, only the first desc has the head */
509 		if (i == 0) {
510 			desc_data = (__le64 *)(&desc[i].data[0]);
511 			n = HCLGE_RD_FIRST_STATS_NUM;
512 		} else {
513 			desc_data = (__le64 *)(&desc[i]);
514 			n = HCLGE_RD_OTHER_STATS_NUM;
515 		}
516 
517 		for (k = 0; k < n; k++) {
518 			*data += le64_to_cpu(*desc_data);
519 			data++;
520 			desc_data++;
521 		}
522 	}
523 
524 	kfree(desc);
525 
526 	return 0;
527 }
528 
hclge_mac_query_reg_num(struct hclge_dev * hdev,u32 * desc_num)529 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
530 {
531 	struct hclge_desc desc;
532 	__le32 *desc_data;
533 	u32 reg_num;
534 	int ret;
535 
536 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
537 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
538 	if (ret)
539 		return ret;
540 
541 	desc_data = (__le32 *)(&desc.data[0]);
542 	reg_num = le32_to_cpu(*desc_data);
543 
544 	*desc_num = 1 + ((reg_num - 3) >> 2) +
545 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
546 
547 	return 0;
548 }
549 
hclge_mac_update_stats(struct hclge_dev * hdev)550 static int hclge_mac_update_stats(struct hclge_dev *hdev)
551 {
552 	u32 desc_num;
553 	int ret;
554 
555 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
556 	/* The firmware supports the new statistics acquisition method */
557 	if (!ret)
558 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
559 	else if (ret == -EOPNOTSUPP)
560 		ret = hclge_mac_update_stats_defective(hdev);
561 	else
562 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
563 
564 	return ret;
565 }
566 
hclge_tqps_update_stats(struct hnae3_handle * handle)567 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
568 {
569 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
570 	struct hclge_vport *vport = hclge_get_vport(handle);
571 	struct hclge_dev *hdev = vport->back;
572 	struct hnae3_queue *queue;
573 	struct hclge_desc desc[1];
574 	struct hclge_tqp *tqp;
575 	int ret, i;
576 
577 	for (i = 0; i < kinfo->num_tqps; i++) {
578 		queue = handle->kinfo.tqp[i];
579 		tqp = container_of(queue, struct hclge_tqp, q);
580 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
581 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
582 					   true);
583 
584 		desc[0].data[0] = cpu_to_le32(tqp->index);
585 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
586 		if (ret) {
587 			dev_err(&hdev->pdev->dev,
588 				"Query tqp stat fail, status = %d,queue = %d\n",
589 				ret, i);
590 			return ret;
591 		}
592 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
593 			le32_to_cpu(desc[0].data[1]);
594 	}
595 
596 	for (i = 0; i < kinfo->num_tqps; i++) {
597 		queue = handle->kinfo.tqp[i];
598 		tqp = container_of(queue, struct hclge_tqp, q);
599 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
600 		hclge_cmd_setup_basic_desc(&desc[0],
601 					   HCLGE_OPC_QUERY_TX_STATS,
602 					   true);
603 
604 		desc[0].data[0] = cpu_to_le32(tqp->index);
605 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
606 		if (ret) {
607 			dev_err(&hdev->pdev->dev,
608 				"Query tqp stat fail, status = %d,queue = %d\n",
609 				ret, i);
610 			return ret;
611 		}
612 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
613 			le32_to_cpu(desc[0].data[1]);
614 	}
615 
616 	return 0;
617 }
618 
hclge_tqps_get_stats(struct hnae3_handle * handle,u64 * data)619 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
620 {
621 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622 	struct hclge_tqp *tqp;
623 	u64 *buff = data;
624 	int i;
625 
626 	for (i = 0; i < kinfo->num_tqps; i++) {
627 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
628 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
629 	}
630 
631 	for (i = 0; i < kinfo->num_tqps; i++) {
632 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
633 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
634 	}
635 
636 	return buff;
637 }
638 
hclge_tqps_get_sset_count(struct hnae3_handle * handle,int stringset)639 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
640 {
641 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
642 
643 	/* each tqp has TX & RX two queues */
644 	return kinfo->num_tqps * (2);
645 }
646 
hclge_tqps_get_strings(struct hnae3_handle * handle,u8 * data)647 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
648 {
649 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
650 	u8 *buff = data;
651 	int i;
652 
653 	for (i = 0; i < kinfo->num_tqps; i++) {
654 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
655 			struct hclge_tqp, q);
656 		snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
657 			 tqp->index);
658 		buff = buff + ETH_GSTRING_LEN;
659 	}
660 
661 	for (i = 0; i < kinfo->num_tqps; i++) {
662 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
663 			struct hclge_tqp, q);
664 		snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
665 			 tqp->index);
666 		buff = buff + ETH_GSTRING_LEN;
667 	}
668 
669 	return buff;
670 }
671 
hclge_comm_get_stats(const void * comm_stats,const struct hclge_comm_stats_str strs[],int size,u64 * data)672 static u64 *hclge_comm_get_stats(const void *comm_stats,
673 				 const struct hclge_comm_stats_str strs[],
674 				 int size, u64 *data)
675 {
676 	u64 *buf = data;
677 	u32 i;
678 
679 	for (i = 0; i < size; i++)
680 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
681 
682 	return buf + size;
683 }
684 
hclge_comm_get_strings(u32 stringset,const struct hclge_comm_stats_str strs[],int size,u8 * data)685 static u8 *hclge_comm_get_strings(u32 stringset,
686 				  const struct hclge_comm_stats_str strs[],
687 				  int size, u8 *data)
688 {
689 	char *buff = (char *)data;
690 	u32 i;
691 
692 	if (stringset != ETH_SS_STATS)
693 		return buff;
694 
695 	for (i = 0; i < size; i++) {
696 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
697 		buff = buff + ETH_GSTRING_LEN;
698 	}
699 
700 	return (u8 *)buff;
701 }
702 
hclge_update_stats_for_all(struct hclge_dev * hdev)703 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
704 {
705 	struct hnae3_handle *handle;
706 	int status;
707 
708 	handle = &hdev->vport[0].nic;
709 	if (handle->client) {
710 		status = hclge_tqps_update_stats(handle);
711 		if (status) {
712 			dev_err(&hdev->pdev->dev,
713 				"Update TQPS stats fail, status = %d.\n",
714 				status);
715 		}
716 	}
717 
718 	status = hclge_mac_update_stats(hdev);
719 	if (status)
720 		dev_err(&hdev->pdev->dev,
721 			"Update MAC stats fail, status = %d.\n", status);
722 }
723 
hclge_update_stats(struct hnae3_handle * handle,struct net_device_stats * net_stats)724 static void hclge_update_stats(struct hnae3_handle *handle,
725 			       struct net_device_stats *net_stats)
726 {
727 	struct hclge_vport *vport = hclge_get_vport(handle);
728 	struct hclge_dev *hdev = vport->back;
729 	int status;
730 
731 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
732 		return;
733 
734 	status = hclge_mac_update_stats(hdev);
735 	if (status)
736 		dev_err(&hdev->pdev->dev,
737 			"Update MAC stats fail, status = %d.\n",
738 			status);
739 
740 	status = hclge_tqps_update_stats(handle);
741 	if (status)
742 		dev_err(&hdev->pdev->dev,
743 			"Update TQPS stats fail, status = %d.\n",
744 			status);
745 
746 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
747 }
748 
hclge_get_sset_count(struct hnae3_handle * handle,int stringset)749 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
750 {
751 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
752 		HNAE3_SUPPORT_PHY_LOOPBACK |\
753 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
754 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
755 
756 	struct hclge_vport *vport = hclge_get_vport(handle);
757 	struct hclge_dev *hdev = vport->back;
758 	int count = 0;
759 
760 	/* Loopback test support rules:
761 	 * mac: only GE mode support
762 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
763 	 * phy: only support when phy device exist on board
764 	 */
765 	if (stringset == ETH_SS_TEST) {
766 		/* clear loopback bit flags at first */
767 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
768 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
769 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
770 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
771 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
772 			count += 1;
773 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
774 		}
775 
776 		count += 2;
777 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
778 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
779 
780 		if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
781 		     hdev->hw.mac.phydev->drv->set_loopback) ||
782 		    hnae3_dev_phy_imp_supported(hdev)) {
783 			count += 1;
784 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
785 		}
786 	} else if (stringset == ETH_SS_STATS) {
787 		count = ARRAY_SIZE(g_mac_stats_string) +
788 			hclge_tqps_get_sset_count(handle, stringset);
789 	}
790 
791 	return count;
792 }
793 
hclge_get_strings(struct hnae3_handle * handle,u32 stringset,u8 * data)794 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
795 			      u8 *data)
796 {
797 	u8 *p = (char *)data;
798 	int size;
799 
800 	if (stringset == ETH_SS_STATS) {
801 		size = ARRAY_SIZE(g_mac_stats_string);
802 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
803 					   size, p);
804 		p = hclge_tqps_get_strings(handle, p);
805 	} else if (stringset == ETH_SS_TEST) {
806 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
807 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
808 			       ETH_GSTRING_LEN);
809 			p += ETH_GSTRING_LEN;
810 		}
811 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
812 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
813 			       ETH_GSTRING_LEN);
814 			p += ETH_GSTRING_LEN;
815 		}
816 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
817 			memcpy(p,
818 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
819 			       ETH_GSTRING_LEN);
820 			p += ETH_GSTRING_LEN;
821 		}
822 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
823 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
824 			       ETH_GSTRING_LEN);
825 			p += ETH_GSTRING_LEN;
826 		}
827 	}
828 }
829 
hclge_get_stats(struct hnae3_handle * handle,u64 * data)830 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
831 {
832 	struct hclge_vport *vport = hclge_get_vport(handle);
833 	struct hclge_dev *hdev = vport->back;
834 	u64 *p;
835 
836 	p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
837 				 ARRAY_SIZE(g_mac_stats_string), data);
838 	p = hclge_tqps_get_stats(handle, p);
839 }
840 
hclge_get_mac_stat(struct hnae3_handle * handle,struct hns3_mac_stats * mac_stats)841 static void hclge_get_mac_stat(struct hnae3_handle *handle,
842 			       struct hns3_mac_stats *mac_stats)
843 {
844 	struct hclge_vport *vport = hclge_get_vport(handle);
845 	struct hclge_dev *hdev = vport->back;
846 
847 	hclge_update_stats(handle, NULL);
848 
849 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
850 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
851 }
852 
hclge_parse_func_status(struct hclge_dev * hdev,struct hclge_func_status_cmd * status)853 static int hclge_parse_func_status(struct hclge_dev *hdev,
854 				   struct hclge_func_status_cmd *status)
855 {
856 #define HCLGE_MAC_ID_MASK	0xF
857 
858 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
859 		return -EINVAL;
860 
861 	/* Set the pf to main pf */
862 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
863 		hdev->flag |= HCLGE_FLAG_MAIN;
864 	else
865 		hdev->flag &= ~HCLGE_FLAG_MAIN;
866 
867 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
868 	return 0;
869 }
870 
hclge_query_function_status(struct hclge_dev * hdev)871 static int hclge_query_function_status(struct hclge_dev *hdev)
872 {
873 #define HCLGE_QUERY_MAX_CNT	5
874 
875 	struct hclge_func_status_cmd *req;
876 	struct hclge_desc desc;
877 	int timeout = 0;
878 	int ret;
879 
880 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
881 	req = (struct hclge_func_status_cmd *)desc.data;
882 
883 	do {
884 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
885 		if (ret) {
886 			dev_err(&hdev->pdev->dev,
887 				"query function status failed %d.\n", ret);
888 			return ret;
889 		}
890 
891 		/* Check pf reset is done */
892 		if (req->pf_state)
893 			break;
894 		usleep_range(1000, 2000);
895 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
896 
897 	return hclge_parse_func_status(hdev, req);
898 }
899 
hclge_query_pf_resource(struct hclge_dev * hdev)900 static int hclge_query_pf_resource(struct hclge_dev *hdev)
901 {
902 	struct hclge_pf_res_cmd *req;
903 	struct hclge_desc desc;
904 	int ret;
905 
906 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
907 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
908 	if (ret) {
909 		dev_err(&hdev->pdev->dev,
910 			"query pf resource failed %d.\n", ret);
911 		return ret;
912 	}
913 
914 	req = (struct hclge_pf_res_cmd *)desc.data;
915 	hdev->num_tqps = le16_to_cpu(req->tqp_num) +
916 			 le16_to_cpu(req->ext_tqp_num);
917 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
918 
919 	if (req->tx_buf_size)
920 		hdev->tx_buf_size =
921 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
922 	else
923 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
924 
925 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
926 
927 	if (req->dv_buf_size)
928 		hdev->dv_buf_size =
929 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
930 	else
931 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
932 
933 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
934 
935 	hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
936 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
937 		dev_err(&hdev->pdev->dev,
938 			"only %u msi resources available, not enough for pf(min:2).\n",
939 			hdev->num_nic_msi);
940 		return -EINVAL;
941 	}
942 
943 	if (hnae3_dev_roce_supported(hdev)) {
944 		hdev->num_roce_msi =
945 			le16_to_cpu(req->pf_intr_vector_number_roce);
946 
947 		/* PF should have NIC vectors and Roce vectors,
948 		 * NIC vectors are queued before Roce vectors.
949 		 */
950 		hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
951 	} else {
952 		hdev->num_msi = hdev->num_nic_msi;
953 	}
954 
955 	return 0;
956 }
957 
hclge_parse_speed(u8 speed_cmd,u32 * speed)958 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
959 {
960 	switch (speed_cmd) {
961 	case 6:
962 		*speed = HCLGE_MAC_SPEED_10M;
963 		break;
964 	case 7:
965 		*speed = HCLGE_MAC_SPEED_100M;
966 		break;
967 	case 0:
968 		*speed = HCLGE_MAC_SPEED_1G;
969 		break;
970 	case 1:
971 		*speed = HCLGE_MAC_SPEED_10G;
972 		break;
973 	case 2:
974 		*speed = HCLGE_MAC_SPEED_25G;
975 		break;
976 	case 3:
977 		*speed = HCLGE_MAC_SPEED_40G;
978 		break;
979 	case 4:
980 		*speed = HCLGE_MAC_SPEED_50G;
981 		break;
982 	case 5:
983 		*speed = HCLGE_MAC_SPEED_100G;
984 		break;
985 	case 8:
986 		*speed = HCLGE_MAC_SPEED_200G;
987 		break;
988 	default:
989 		return -EINVAL;
990 	}
991 
992 	return 0;
993 }
994 
hclge_check_port_speed(struct hnae3_handle * handle,u32 speed)995 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
996 {
997 	struct hclge_vport *vport = hclge_get_vport(handle);
998 	struct hclge_dev *hdev = vport->back;
999 	u32 speed_ability = hdev->hw.mac.speed_ability;
1000 	u32 speed_bit = 0;
1001 
1002 	switch (speed) {
1003 	case HCLGE_MAC_SPEED_10M:
1004 		speed_bit = HCLGE_SUPPORT_10M_BIT;
1005 		break;
1006 	case HCLGE_MAC_SPEED_100M:
1007 		speed_bit = HCLGE_SUPPORT_100M_BIT;
1008 		break;
1009 	case HCLGE_MAC_SPEED_1G:
1010 		speed_bit = HCLGE_SUPPORT_1G_BIT;
1011 		break;
1012 	case HCLGE_MAC_SPEED_10G:
1013 		speed_bit = HCLGE_SUPPORT_10G_BIT;
1014 		break;
1015 	case HCLGE_MAC_SPEED_25G:
1016 		speed_bit = HCLGE_SUPPORT_25G_BIT;
1017 		break;
1018 	case HCLGE_MAC_SPEED_40G:
1019 		speed_bit = HCLGE_SUPPORT_40G_BIT;
1020 		break;
1021 	case HCLGE_MAC_SPEED_50G:
1022 		speed_bit = HCLGE_SUPPORT_50G_BIT;
1023 		break;
1024 	case HCLGE_MAC_SPEED_100G:
1025 		speed_bit = HCLGE_SUPPORT_100G_BIT;
1026 		break;
1027 	case HCLGE_MAC_SPEED_200G:
1028 		speed_bit = HCLGE_SUPPORT_200G_BIT;
1029 		break;
1030 	default:
1031 		return -EINVAL;
1032 	}
1033 
1034 	if (speed_bit & speed_ability)
1035 		return 0;
1036 
1037 	return -EINVAL;
1038 }
1039 
hclge_convert_setting_sr(struct hclge_mac * mac,u16 speed_ability)1040 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1041 {
1042 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1043 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1044 				 mac->supported);
1045 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1046 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1047 				 mac->supported);
1048 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1050 				 mac->supported);
1051 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1052 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1053 				 mac->supported);
1054 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1055 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1056 				 mac->supported);
1057 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1058 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1059 				 mac->supported);
1060 }
1061 
hclge_convert_setting_lr(struct hclge_mac * mac,u16 speed_ability)1062 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1063 {
1064 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1065 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1066 				 mac->supported);
1067 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1068 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1069 				 mac->supported);
1070 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1072 				 mac->supported);
1073 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1074 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1075 				 mac->supported);
1076 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1077 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1078 				 mac->supported);
1079 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1080 		linkmode_set_bit(
1081 			ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1082 			mac->supported);
1083 }
1084 
hclge_convert_setting_cr(struct hclge_mac * mac,u16 speed_ability)1085 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1086 {
1087 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1089 				 mac->supported);
1090 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1092 				 mac->supported);
1093 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1095 				 mac->supported);
1096 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1098 				 mac->supported);
1099 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1101 				 mac->supported);
1102 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1104 				 mac->supported);
1105 }
1106 
hclge_convert_setting_kr(struct hclge_mac * mac,u16 speed_ability)1107 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1108 {
1109 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1110 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1111 				 mac->supported);
1112 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1113 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1114 				 mac->supported);
1115 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1116 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1117 				 mac->supported);
1118 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1119 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1120 				 mac->supported);
1121 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1122 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1123 				 mac->supported);
1124 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1125 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1126 				 mac->supported);
1127 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1128 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1129 				 mac->supported);
1130 }
1131 
hclge_convert_setting_fec(struct hclge_mac * mac)1132 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1133 {
1134 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1135 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1136 
1137 	switch (mac->speed) {
1138 	case HCLGE_MAC_SPEED_10G:
1139 	case HCLGE_MAC_SPEED_40G:
1140 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1141 				 mac->supported);
1142 		mac->fec_ability =
1143 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1144 		break;
1145 	case HCLGE_MAC_SPEED_25G:
1146 	case HCLGE_MAC_SPEED_50G:
1147 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1148 				 mac->supported);
1149 		mac->fec_ability =
1150 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151 			BIT(HNAE3_FEC_AUTO);
1152 		break;
1153 	case HCLGE_MAC_SPEED_100G:
1154 	case HCLGE_MAC_SPEED_200G:
1155 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1156 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1157 		break;
1158 	default:
1159 		mac->fec_ability = 0;
1160 		break;
1161 	}
1162 }
1163 
hclge_parse_fiber_link_mode(struct hclge_dev * hdev,u16 speed_ability)1164 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1165 					u16 speed_ability)
1166 {
1167 	struct hclge_mac *mac = &hdev->hw.mac;
1168 
1169 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1171 				 mac->supported);
1172 
1173 	hclge_convert_setting_sr(mac, speed_ability);
1174 	hclge_convert_setting_lr(mac, speed_ability);
1175 	hclge_convert_setting_cr(mac, speed_ability);
1176 	if (hnae3_dev_fec_supported(hdev))
1177 		hclge_convert_setting_fec(mac);
1178 
1179 	if (hnae3_dev_pause_supported(hdev))
1180 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1181 
1182 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1183 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1184 }
1185 
hclge_parse_backplane_link_mode(struct hclge_dev * hdev,u16 speed_ability)1186 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1187 					    u16 speed_ability)
1188 {
1189 	struct hclge_mac *mac = &hdev->hw.mac;
1190 
1191 	hclge_convert_setting_kr(mac, speed_ability);
1192 	if (hnae3_dev_fec_supported(hdev))
1193 		hclge_convert_setting_fec(mac);
1194 
1195 	if (hnae3_dev_pause_supported(hdev))
1196 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1197 
1198 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1199 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1200 }
1201 
hclge_parse_copper_link_mode(struct hclge_dev * hdev,u16 speed_ability)1202 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1203 					 u16 speed_ability)
1204 {
1205 	unsigned long *supported = hdev->hw.mac.supported;
1206 
1207 	/* default to support all speed for GE port */
1208 	if (!speed_ability)
1209 		speed_ability = HCLGE_SUPPORT_GE;
1210 
1211 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1212 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1213 				 supported);
1214 
1215 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1216 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1217 				 supported);
1218 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1219 				 supported);
1220 	}
1221 
1222 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1223 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1224 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1225 	}
1226 
1227 	if (hnae3_dev_pause_supported(hdev)) {
1228 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1229 		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1230 	}
1231 
1232 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1233 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1234 }
1235 
hclge_parse_link_mode(struct hclge_dev * hdev,u16 speed_ability)1236 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1237 {
1238 	u8 media_type = hdev->hw.mac.media_type;
1239 
1240 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1241 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1242 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1243 		hclge_parse_copper_link_mode(hdev, speed_ability);
1244 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1245 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1246 }
1247 
hclge_get_max_speed(u16 speed_ability)1248 static u32 hclge_get_max_speed(u16 speed_ability)
1249 {
1250 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1251 		return HCLGE_MAC_SPEED_200G;
1252 
1253 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1254 		return HCLGE_MAC_SPEED_100G;
1255 
1256 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1257 		return HCLGE_MAC_SPEED_50G;
1258 
1259 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1260 		return HCLGE_MAC_SPEED_40G;
1261 
1262 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1263 		return HCLGE_MAC_SPEED_25G;
1264 
1265 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1266 		return HCLGE_MAC_SPEED_10G;
1267 
1268 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1269 		return HCLGE_MAC_SPEED_1G;
1270 
1271 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1272 		return HCLGE_MAC_SPEED_100M;
1273 
1274 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1275 		return HCLGE_MAC_SPEED_10M;
1276 
1277 	return HCLGE_MAC_SPEED_1G;
1278 }
1279 
hclge_parse_cfg(struct hclge_cfg * cfg,struct hclge_desc * desc)1280 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1281 {
1282 #define SPEED_ABILITY_EXT_SHIFT			8
1283 
1284 	struct hclge_cfg_param_cmd *req;
1285 	u64 mac_addr_tmp_high;
1286 	u16 speed_ability_ext;
1287 	u64 mac_addr_tmp;
1288 	unsigned int i;
1289 
1290 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1291 
1292 	/* get the configuration */
1293 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1294 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1295 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1296 					    HCLGE_CFG_TQP_DESC_N_M,
1297 					    HCLGE_CFG_TQP_DESC_N_S);
1298 
1299 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1300 					HCLGE_CFG_PHY_ADDR_M,
1301 					HCLGE_CFG_PHY_ADDR_S);
1302 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1303 					  HCLGE_CFG_MEDIA_TP_M,
1304 					  HCLGE_CFG_MEDIA_TP_S);
1305 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1306 					  HCLGE_CFG_RX_BUF_LEN_M,
1307 					  HCLGE_CFG_RX_BUF_LEN_S);
1308 	/* get mac_address */
1309 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1310 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1311 					    HCLGE_CFG_MAC_ADDR_H_M,
1312 					    HCLGE_CFG_MAC_ADDR_H_S);
1313 
1314 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1315 
1316 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1317 					     HCLGE_CFG_DEFAULT_SPEED_M,
1318 					     HCLGE_CFG_DEFAULT_SPEED_S);
1319 	cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1320 					       HCLGE_CFG_RSS_SIZE_M,
1321 					       HCLGE_CFG_RSS_SIZE_S);
1322 
1323 	for (i = 0; i < ETH_ALEN; i++)
1324 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1325 
1326 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1327 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1328 
1329 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1330 					     HCLGE_CFG_SPEED_ABILITY_M,
1331 					     HCLGE_CFG_SPEED_ABILITY_S);
1332 	speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1333 					    HCLGE_CFG_SPEED_ABILITY_EXT_M,
1334 					    HCLGE_CFG_SPEED_ABILITY_EXT_S);
1335 	cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1336 
1337 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1338 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1339 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1340 	if (!cfg->umv_space)
1341 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1342 
1343 	cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1344 					       HCLGE_CFG_PF_RSS_SIZE_M,
1345 					       HCLGE_CFG_PF_RSS_SIZE_S);
1346 
1347 	/* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1348 	 * power of 2, instead of reading out directly. This would
1349 	 * be more flexible for future changes and expansions.
1350 	 * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1351 	 * it does not make sense if PF's field is 0. In this case, PF and VF
1352 	 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1353 	 */
1354 	cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1355 			       1U << cfg->pf_rss_size_max :
1356 			       cfg->vf_rss_size_max;
1357 }
1358 
1359 /* hclge_get_cfg: query the static parameter from flash
1360  * @hdev: pointer to struct hclge_dev
1361  * @hcfg: the config structure to be getted
1362  */
hclge_get_cfg(struct hclge_dev * hdev,struct hclge_cfg * hcfg)1363 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1364 {
1365 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1366 	struct hclge_cfg_param_cmd *req;
1367 	unsigned int i;
1368 	int ret;
1369 
1370 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1371 		u32 offset = 0;
1372 
1373 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1374 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1375 					   true);
1376 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1377 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1378 		/* Len should be united by 4 bytes when send to hardware */
1379 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1380 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1381 		req->offset = cpu_to_le32(offset);
1382 	}
1383 
1384 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1385 	if (ret) {
1386 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1387 		return ret;
1388 	}
1389 
1390 	hclge_parse_cfg(hcfg, desc);
1391 
1392 	return 0;
1393 }
1394 
hclge_set_default_dev_specs(struct hclge_dev * hdev)1395 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1396 {
1397 #define HCLGE_MAX_NON_TSO_BD_NUM			8U
1398 
1399 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1400 
1401 	ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1402 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1403 	ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1404 	ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1405 	ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1406 	ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1407 	ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1408 }
1409 
hclge_parse_dev_specs(struct hclge_dev * hdev,struct hclge_desc * desc)1410 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1411 				  struct hclge_desc *desc)
1412 {
1413 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1414 	struct hclge_dev_specs_0_cmd *req0;
1415 	struct hclge_dev_specs_1_cmd *req1;
1416 
1417 	req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1418 	req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1419 
1420 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1421 	ae_dev->dev_specs.rss_ind_tbl_size =
1422 		le16_to_cpu(req0->rss_ind_tbl_size);
1423 	ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1424 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1425 	ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1426 	ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1427 	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1428 	ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1429 }
1430 
hclge_check_dev_specs(struct hclge_dev * hdev)1431 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1432 {
1433 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1434 
1435 	if (!dev_specs->max_non_tso_bd_num)
1436 		dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1437 	if (!dev_specs->rss_ind_tbl_size)
1438 		dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1439 	if (!dev_specs->rss_key_size)
1440 		dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1441 	if (!dev_specs->max_tm_rate)
1442 		dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1443 	if (!dev_specs->max_qset_num)
1444 		dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1445 	if (!dev_specs->max_int_gl)
1446 		dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1447 	if (!dev_specs->max_frm_size)
1448 		dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1449 }
1450 
hclge_query_dev_specs(struct hclge_dev * hdev)1451 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1452 {
1453 	struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1454 	int ret;
1455 	int i;
1456 
1457 	/* set default specifications as devices lower than version V3 do not
1458 	 * support querying specifications from firmware.
1459 	 */
1460 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1461 		hclge_set_default_dev_specs(hdev);
1462 		return 0;
1463 	}
1464 
1465 	for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1466 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1467 					   true);
1468 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1469 	}
1470 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1471 
1472 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1473 	if (ret)
1474 		return ret;
1475 
1476 	hclge_parse_dev_specs(hdev, desc);
1477 	hclge_check_dev_specs(hdev);
1478 
1479 	return 0;
1480 }
1481 
hclge_get_cap(struct hclge_dev * hdev)1482 static int hclge_get_cap(struct hclge_dev *hdev)
1483 {
1484 	int ret;
1485 
1486 	ret = hclge_query_function_status(hdev);
1487 	if (ret) {
1488 		dev_err(&hdev->pdev->dev,
1489 			"query function status error %d.\n", ret);
1490 		return ret;
1491 	}
1492 
1493 	/* get pf resource */
1494 	return hclge_query_pf_resource(hdev);
1495 }
1496 
hclge_init_kdump_kernel_config(struct hclge_dev * hdev)1497 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1498 {
1499 #define HCLGE_MIN_TX_DESC	64
1500 #define HCLGE_MIN_RX_DESC	64
1501 
1502 	if (!is_kdump_kernel())
1503 		return;
1504 
1505 	dev_info(&hdev->pdev->dev,
1506 		 "Running kdump kernel. Using minimal resources\n");
1507 
1508 	/* minimal queue pairs equals to the number of vports */
1509 	hdev->num_tqps = hdev->num_req_vfs + 1;
1510 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1511 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1512 }
1513 
hclge_configure(struct hclge_dev * hdev)1514 static int hclge_configure(struct hclge_dev *hdev)
1515 {
1516 	struct hclge_cfg cfg;
1517 	unsigned int i;
1518 	int ret;
1519 
1520 	ret = hclge_get_cfg(hdev, &cfg);
1521 	if (ret)
1522 		return ret;
1523 
1524 	hdev->base_tqp_pid = 0;
1525 	hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1526 	hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1527 	hdev->rx_buf_len = cfg.rx_buf_len;
1528 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1529 	hdev->hw.mac.media_type = cfg.media_type;
1530 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1531 	hdev->num_tx_desc = cfg.tqp_desc_num;
1532 	hdev->num_rx_desc = cfg.tqp_desc_num;
1533 	hdev->tm_info.num_pg = 1;
1534 	hdev->tc_max = cfg.tc_num;
1535 	hdev->tm_info.hw_pfc_map = 0;
1536 	hdev->wanted_umv_size = cfg.umv_space;
1537 
1538 	if (hnae3_dev_fd_supported(hdev)) {
1539 		hdev->fd_en = true;
1540 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1541 	}
1542 
1543 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1544 	if (ret) {
1545 		dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1546 			cfg.default_speed, ret);
1547 		return ret;
1548 	}
1549 
1550 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1551 
1552 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1553 
1554 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1555 	    (hdev->tc_max < 1)) {
1556 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1557 			 hdev->tc_max);
1558 		hdev->tc_max = 1;
1559 	}
1560 
1561 	/* Dev does not support DCB */
1562 	if (!hnae3_dev_dcb_supported(hdev)) {
1563 		hdev->tc_max = 1;
1564 		hdev->pfc_max = 0;
1565 	} else {
1566 		hdev->pfc_max = hdev->tc_max;
1567 	}
1568 
1569 	hdev->tm_info.num_tc = 1;
1570 
1571 	/* Currently not support uncontiuous tc */
1572 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1573 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1574 
1575 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1576 
1577 	hclge_init_kdump_kernel_config(hdev);
1578 
1579 	/* Set the init affinity based on pci func number */
1580 	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1581 	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1582 	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1583 			&hdev->affinity_mask);
1584 
1585 	return ret;
1586 }
1587 
hclge_config_tso(struct hclge_dev * hdev,u16 tso_mss_min,u16 tso_mss_max)1588 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1589 			    u16 tso_mss_max)
1590 {
1591 	struct hclge_cfg_tso_status_cmd *req;
1592 	struct hclge_desc desc;
1593 
1594 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1595 
1596 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1597 	req->tso_mss_min = cpu_to_le16(tso_mss_min);
1598 	req->tso_mss_max = cpu_to_le16(tso_mss_max);
1599 
1600 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1601 }
1602 
hclge_config_gro(struct hclge_dev * hdev,bool en)1603 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1604 {
1605 	struct hclge_cfg_gro_status_cmd *req;
1606 	struct hclge_desc desc;
1607 	int ret;
1608 
1609 	if (!hnae3_dev_gro_supported(hdev))
1610 		return 0;
1611 
1612 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1613 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1614 
1615 	req->gro_en = en ? 1 : 0;
1616 
1617 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1618 	if (ret)
1619 		dev_err(&hdev->pdev->dev,
1620 			"GRO hardware config cmd failed, ret = %d\n", ret);
1621 
1622 	return ret;
1623 }
1624 
hclge_alloc_tqps(struct hclge_dev * hdev)1625 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1626 {
1627 	struct hclge_tqp *tqp;
1628 	int i;
1629 
1630 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1631 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1632 	if (!hdev->htqp)
1633 		return -ENOMEM;
1634 
1635 	tqp = hdev->htqp;
1636 
1637 	for (i = 0; i < hdev->num_tqps; i++) {
1638 		tqp->dev = &hdev->pdev->dev;
1639 		tqp->index = i;
1640 
1641 		tqp->q.ae_algo = &ae_algo;
1642 		tqp->q.buf_size = hdev->rx_buf_len;
1643 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1644 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1645 
1646 		/* need an extended offset to configure queues >=
1647 		 * HCLGE_TQP_MAX_SIZE_DEV_V2
1648 		 */
1649 		if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1650 			tqp->q.io_base = hdev->hw.io_base +
1651 					 HCLGE_TQP_REG_OFFSET +
1652 					 i * HCLGE_TQP_REG_SIZE;
1653 		else
1654 			tqp->q.io_base = hdev->hw.io_base +
1655 					 HCLGE_TQP_REG_OFFSET +
1656 					 HCLGE_TQP_EXT_REG_OFFSET +
1657 					 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1658 					 HCLGE_TQP_REG_SIZE;
1659 
1660 		tqp++;
1661 	}
1662 
1663 	return 0;
1664 }
1665 
hclge_map_tqps_to_func(struct hclge_dev * hdev,u16 func_id,u16 tqp_pid,u16 tqp_vid,bool is_pf)1666 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1667 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1668 {
1669 	struct hclge_tqp_map_cmd *req;
1670 	struct hclge_desc desc;
1671 	int ret;
1672 
1673 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1674 
1675 	req = (struct hclge_tqp_map_cmd *)desc.data;
1676 	req->tqp_id = cpu_to_le16(tqp_pid);
1677 	req->tqp_vf = func_id;
1678 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1679 	if (!is_pf)
1680 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1681 	req->tqp_vid = cpu_to_le16(tqp_vid);
1682 
1683 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1684 	if (ret)
1685 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1686 
1687 	return ret;
1688 }
1689 
hclge_assign_tqp(struct hclge_vport * vport,u16 num_tqps)1690 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1691 {
1692 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1693 	struct hclge_dev *hdev = vport->back;
1694 	int i, alloced;
1695 
1696 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1697 	     alloced < num_tqps; i++) {
1698 		if (!hdev->htqp[i].alloced) {
1699 			hdev->htqp[i].q.handle = &vport->nic;
1700 			hdev->htqp[i].q.tqp_index = alloced;
1701 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1702 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1703 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1704 			hdev->htqp[i].alloced = true;
1705 			alloced++;
1706 		}
1707 	}
1708 	vport->alloc_tqps = alloced;
1709 	kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1710 				vport->alloc_tqps / hdev->tm_info.num_tc);
1711 
1712 	/* ensure one to one mapping between irq and queue at default */
1713 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1714 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1715 
1716 	return 0;
1717 }
1718 
hclge_knic_setup(struct hclge_vport * vport,u16 num_tqps,u16 num_tx_desc,u16 num_rx_desc)1719 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1720 			    u16 num_tx_desc, u16 num_rx_desc)
1721 
1722 {
1723 	struct hnae3_handle *nic = &vport->nic;
1724 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1725 	struct hclge_dev *hdev = vport->back;
1726 	int ret;
1727 
1728 	kinfo->num_tx_desc = num_tx_desc;
1729 	kinfo->num_rx_desc = num_rx_desc;
1730 
1731 	kinfo->rx_buf_len = hdev->rx_buf_len;
1732 
1733 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1734 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1735 	if (!kinfo->tqp)
1736 		return -ENOMEM;
1737 
1738 	ret = hclge_assign_tqp(vport, num_tqps);
1739 	if (ret)
1740 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1741 
1742 	return ret;
1743 }
1744 
hclge_map_tqp_to_vport(struct hclge_dev * hdev,struct hclge_vport * vport)1745 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1746 				  struct hclge_vport *vport)
1747 {
1748 	struct hnae3_handle *nic = &vport->nic;
1749 	struct hnae3_knic_private_info *kinfo;
1750 	u16 i;
1751 
1752 	kinfo = &nic->kinfo;
1753 	for (i = 0; i < vport->alloc_tqps; i++) {
1754 		struct hclge_tqp *q =
1755 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1756 		bool is_pf;
1757 		int ret;
1758 
1759 		is_pf = !(vport->vport_id);
1760 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1761 					     i, is_pf);
1762 		if (ret)
1763 			return ret;
1764 	}
1765 
1766 	return 0;
1767 }
1768 
hclge_map_tqp(struct hclge_dev * hdev)1769 static int hclge_map_tqp(struct hclge_dev *hdev)
1770 {
1771 	struct hclge_vport *vport = hdev->vport;
1772 	u16 i, num_vport;
1773 
1774 	num_vport = hdev->num_req_vfs + 1;
1775 	for (i = 0; i < num_vport; i++)	{
1776 		int ret;
1777 
1778 		ret = hclge_map_tqp_to_vport(hdev, vport);
1779 		if (ret)
1780 			return ret;
1781 
1782 		vport++;
1783 	}
1784 
1785 	return 0;
1786 }
1787 
hclge_vport_setup(struct hclge_vport * vport,u16 num_tqps)1788 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1789 {
1790 	struct hnae3_handle *nic = &vport->nic;
1791 	struct hclge_dev *hdev = vport->back;
1792 	int ret;
1793 
1794 	nic->pdev = hdev->pdev;
1795 	nic->ae_algo = &ae_algo;
1796 	nic->numa_node_mask = hdev->numa_node_mask;
1797 
1798 	ret = hclge_knic_setup(vport, num_tqps,
1799 			       hdev->num_tx_desc, hdev->num_rx_desc);
1800 	if (ret)
1801 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1802 
1803 	return ret;
1804 }
1805 
hclge_alloc_vport(struct hclge_dev * hdev)1806 static int hclge_alloc_vport(struct hclge_dev *hdev)
1807 {
1808 	struct pci_dev *pdev = hdev->pdev;
1809 	struct hclge_vport *vport;
1810 	u32 tqp_main_vport;
1811 	u32 tqp_per_vport;
1812 	int num_vport, i;
1813 	int ret;
1814 
1815 	/* We need to alloc a vport for main NIC of PF */
1816 	num_vport = hdev->num_req_vfs + 1;
1817 
1818 	if (hdev->num_tqps < num_vport) {
1819 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1820 			hdev->num_tqps, num_vport);
1821 		return -EINVAL;
1822 	}
1823 
1824 	/* Alloc the same number of TQPs for every vport */
1825 	tqp_per_vport = hdev->num_tqps / num_vport;
1826 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1827 
1828 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1829 			     GFP_KERNEL);
1830 	if (!vport)
1831 		return -ENOMEM;
1832 
1833 	hdev->vport = vport;
1834 	hdev->num_alloc_vport = num_vport;
1835 
1836 	if (IS_ENABLED(CONFIG_PCI_IOV))
1837 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1838 
1839 	for (i = 0; i < num_vport; i++) {
1840 		vport->back = hdev;
1841 		vport->vport_id = i;
1842 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1843 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1844 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1845 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1846 		INIT_LIST_HEAD(&vport->vlan_list);
1847 		INIT_LIST_HEAD(&vport->uc_mac_list);
1848 		INIT_LIST_HEAD(&vport->mc_mac_list);
1849 		spin_lock_init(&vport->mac_list_lock);
1850 
1851 		if (i == 0)
1852 			ret = hclge_vport_setup(vport, tqp_main_vport);
1853 		else
1854 			ret = hclge_vport_setup(vport, tqp_per_vport);
1855 		if (ret) {
1856 			dev_err(&pdev->dev,
1857 				"vport setup failed for vport %d, %d\n",
1858 				i, ret);
1859 			return ret;
1860 		}
1861 
1862 		vport++;
1863 	}
1864 
1865 	return 0;
1866 }
1867 
hclge_cmd_alloc_tx_buff(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1868 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1869 				    struct hclge_pkt_buf_alloc *buf_alloc)
1870 {
1871 /* TX buffer size is unit by 128 byte */
1872 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1873 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1874 	struct hclge_tx_buff_alloc_cmd *req;
1875 	struct hclge_desc desc;
1876 	int ret;
1877 	u8 i;
1878 
1879 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1880 
1881 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1882 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1883 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1884 
1885 		req->tx_pkt_buff[i] =
1886 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1887 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1888 	}
1889 
1890 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1891 	if (ret)
1892 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1893 			ret);
1894 
1895 	return ret;
1896 }
1897 
hclge_tx_buffer_alloc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1898 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1899 				 struct hclge_pkt_buf_alloc *buf_alloc)
1900 {
1901 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1902 
1903 	if (ret)
1904 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1905 
1906 	return ret;
1907 }
1908 
hclge_get_tc_num(struct hclge_dev * hdev)1909 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1910 {
1911 	unsigned int i;
1912 	u32 cnt = 0;
1913 
1914 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1915 		if (hdev->hw_tc_map & BIT(i))
1916 			cnt++;
1917 	return cnt;
1918 }
1919 
1920 /* Get the number of pfc enabled TCs, which have private buffer */
hclge_get_pfc_priv_num(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1921 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1922 				  struct hclge_pkt_buf_alloc *buf_alloc)
1923 {
1924 	struct hclge_priv_buf *priv;
1925 	unsigned int i;
1926 	int cnt = 0;
1927 
1928 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1929 		priv = &buf_alloc->priv_buf[i];
1930 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1931 		    priv->enable)
1932 			cnt++;
1933 	}
1934 
1935 	return cnt;
1936 }
1937 
1938 /* Get the number of pfc disabled TCs, which have private buffer */
hclge_get_no_pfc_priv_num(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)1939 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1940 				     struct hclge_pkt_buf_alloc *buf_alloc)
1941 {
1942 	struct hclge_priv_buf *priv;
1943 	unsigned int i;
1944 	int cnt = 0;
1945 
1946 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1947 		priv = &buf_alloc->priv_buf[i];
1948 		if (hdev->hw_tc_map & BIT(i) &&
1949 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1950 		    priv->enable)
1951 			cnt++;
1952 	}
1953 
1954 	return cnt;
1955 }
1956 
hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc * buf_alloc)1957 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1958 {
1959 	struct hclge_priv_buf *priv;
1960 	u32 rx_priv = 0;
1961 	int i;
1962 
1963 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1964 		priv = &buf_alloc->priv_buf[i];
1965 		if (priv->enable)
1966 			rx_priv += priv->buf_size;
1967 	}
1968 	return rx_priv;
1969 }
1970 
hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc * buf_alloc)1971 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1972 {
1973 	u32 i, total_tx_size = 0;
1974 
1975 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1976 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1977 
1978 	return total_tx_size;
1979 }
1980 
hclge_is_rx_buf_ok(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc,u32 rx_all)1981 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1982 				struct hclge_pkt_buf_alloc *buf_alloc,
1983 				u32 rx_all)
1984 {
1985 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1986 	u32 tc_num = hclge_get_tc_num(hdev);
1987 	u32 shared_buf, aligned_mps;
1988 	u32 rx_priv;
1989 	int i;
1990 
1991 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1992 
1993 	if (hnae3_dev_dcb_supported(hdev))
1994 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1995 					hdev->dv_buf_size;
1996 	else
1997 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1998 					+ hdev->dv_buf_size;
1999 
2000 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2001 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2002 			     HCLGE_BUF_SIZE_UNIT);
2003 
2004 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2005 	if (rx_all < rx_priv + shared_std)
2006 		return false;
2007 
2008 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2009 	buf_alloc->s_buf.buf_size = shared_buf;
2010 	if (hnae3_dev_dcb_supported(hdev)) {
2011 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2012 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2013 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2014 				  HCLGE_BUF_SIZE_UNIT);
2015 	} else {
2016 		buf_alloc->s_buf.self.high = aligned_mps +
2017 						HCLGE_NON_DCB_ADDITIONAL_BUF;
2018 		buf_alloc->s_buf.self.low = aligned_mps;
2019 	}
2020 
2021 	if (hnae3_dev_dcb_supported(hdev)) {
2022 		hi_thrd = shared_buf - hdev->dv_buf_size;
2023 
2024 		if (tc_num <= NEED_RESERVE_TC_NUM)
2025 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2026 					/ BUF_MAX_PERCENT;
2027 
2028 		if (tc_num)
2029 			hi_thrd = hi_thrd / tc_num;
2030 
2031 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2032 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2033 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2034 	} else {
2035 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2036 		lo_thrd = aligned_mps;
2037 	}
2038 
2039 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2040 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2041 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2042 	}
2043 
2044 	return true;
2045 }
2046 
hclge_tx_buffer_calc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2047 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2048 				struct hclge_pkt_buf_alloc *buf_alloc)
2049 {
2050 	u32 i, total_size;
2051 
2052 	total_size = hdev->pkt_buf_size;
2053 
2054 	/* alloc tx buffer for all enabled tc */
2055 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2056 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2057 
2058 		if (hdev->hw_tc_map & BIT(i)) {
2059 			if (total_size < hdev->tx_buf_size)
2060 				return -ENOMEM;
2061 
2062 			priv->tx_buf_size = hdev->tx_buf_size;
2063 		} else {
2064 			priv->tx_buf_size = 0;
2065 		}
2066 
2067 		total_size -= priv->tx_buf_size;
2068 	}
2069 
2070 	return 0;
2071 }
2072 
hclge_rx_buf_calc_all(struct hclge_dev * hdev,bool max,struct hclge_pkt_buf_alloc * buf_alloc)2073 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2074 				  struct hclge_pkt_buf_alloc *buf_alloc)
2075 {
2076 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2077 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2078 	unsigned int i;
2079 
2080 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2081 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2082 
2083 		priv->enable = 0;
2084 		priv->wl.low = 0;
2085 		priv->wl.high = 0;
2086 		priv->buf_size = 0;
2087 
2088 		if (!(hdev->hw_tc_map & BIT(i)))
2089 			continue;
2090 
2091 		priv->enable = 1;
2092 
2093 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2094 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2095 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
2096 						HCLGE_BUF_SIZE_UNIT);
2097 		} else {
2098 			priv->wl.low = 0;
2099 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2100 					aligned_mps;
2101 		}
2102 
2103 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2104 	}
2105 
2106 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2107 }
2108 
hclge_drop_nopfc_buf_till_fit(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2109 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2110 					  struct hclge_pkt_buf_alloc *buf_alloc)
2111 {
2112 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2113 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2114 	int i;
2115 
2116 	/* let the last to be cleared first */
2117 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2118 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2119 		unsigned int mask = BIT((unsigned int)i);
2120 
2121 		if (hdev->hw_tc_map & mask &&
2122 		    !(hdev->tm_info.hw_pfc_map & mask)) {
2123 			/* Clear the no pfc TC private buffer */
2124 			priv->wl.low = 0;
2125 			priv->wl.high = 0;
2126 			priv->buf_size = 0;
2127 			priv->enable = 0;
2128 			no_pfc_priv_num--;
2129 		}
2130 
2131 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2132 		    no_pfc_priv_num == 0)
2133 			break;
2134 	}
2135 
2136 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2137 }
2138 
hclge_drop_pfc_buf_till_fit(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2139 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2140 					struct hclge_pkt_buf_alloc *buf_alloc)
2141 {
2142 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2143 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2144 	int i;
2145 
2146 	/* let the last to be cleared first */
2147 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2148 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2149 		unsigned int mask = BIT((unsigned int)i);
2150 
2151 		if (hdev->hw_tc_map & mask &&
2152 		    hdev->tm_info.hw_pfc_map & mask) {
2153 			/* Reduce the number of pfc TC with private buffer */
2154 			priv->wl.low = 0;
2155 			priv->enable = 0;
2156 			priv->wl.high = 0;
2157 			priv->buf_size = 0;
2158 			pfc_priv_num--;
2159 		}
2160 
2161 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2162 		    pfc_priv_num == 0)
2163 			break;
2164 	}
2165 
2166 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2167 }
2168 
hclge_only_alloc_priv_buff(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2169 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2170 				      struct hclge_pkt_buf_alloc *buf_alloc)
2171 {
2172 #define COMPENSATE_BUFFER	0x3C00
2173 #define COMPENSATE_HALF_MPS_NUM	5
2174 #define PRIV_WL_GAP		0x1800
2175 
2176 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2177 	u32 tc_num = hclge_get_tc_num(hdev);
2178 	u32 half_mps = hdev->mps >> 1;
2179 	u32 min_rx_priv;
2180 	unsigned int i;
2181 
2182 	if (tc_num)
2183 		rx_priv = rx_priv / tc_num;
2184 
2185 	if (tc_num <= NEED_RESERVE_TC_NUM)
2186 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2187 
2188 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2189 			COMPENSATE_HALF_MPS_NUM * half_mps;
2190 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2191 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2192 	if (rx_priv < min_rx_priv)
2193 		return false;
2194 
2195 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2196 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2197 
2198 		priv->enable = 0;
2199 		priv->wl.low = 0;
2200 		priv->wl.high = 0;
2201 		priv->buf_size = 0;
2202 
2203 		if (!(hdev->hw_tc_map & BIT(i)))
2204 			continue;
2205 
2206 		priv->enable = 1;
2207 		priv->buf_size = rx_priv;
2208 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2209 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2210 	}
2211 
2212 	buf_alloc->s_buf.buf_size = 0;
2213 
2214 	return true;
2215 }
2216 
2217 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2218  * @hdev: pointer to struct hclge_dev
2219  * @buf_alloc: pointer to buffer calculation data
2220  * @return: 0: calculate successful, negative: fail
2221  */
hclge_rx_buffer_calc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2222 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2223 				struct hclge_pkt_buf_alloc *buf_alloc)
2224 {
2225 	/* When DCB is not supported, rx private buffer is not allocated. */
2226 	if (!hnae3_dev_dcb_supported(hdev)) {
2227 		u32 rx_all = hdev->pkt_buf_size;
2228 
2229 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2230 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2231 			return -ENOMEM;
2232 
2233 		return 0;
2234 	}
2235 
2236 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2237 		return 0;
2238 
2239 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2240 		return 0;
2241 
2242 	/* try to decrease the buffer size */
2243 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2244 		return 0;
2245 
2246 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2247 		return 0;
2248 
2249 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2250 		return 0;
2251 
2252 	return -ENOMEM;
2253 }
2254 
hclge_rx_priv_buf_alloc(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2255 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2256 				   struct hclge_pkt_buf_alloc *buf_alloc)
2257 {
2258 	struct hclge_rx_priv_buff_cmd *req;
2259 	struct hclge_desc desc;
2260 	int ret;
2261 	int i;
2262 
2263 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2264 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2265 
2266 	/* Alloc private buffer TCs */
2267 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2268 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2269 
2270 		req->buf_num[i] =
2271 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2272 		req->buf_num[i] |=
2273 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2274 	}
2275 
2276 	req->shared_buf =
2277 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2278 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2279 
2280 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2281 	if (ret)
2282 		dev_err(&hdev->pdev->dev,
2283 			"rx private buffer alloc cmd failed %d\n", ret);
2284 
2285 	return ret;
2286 }
2287 
hclge_rx_priv_wl_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2288 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2289 				   struct hclge_pkt_buf_alloc *buf_alloc)
2290 {
2291 	struct hclge_rx_priv_wl_buf *req;
2292 	struct hclge_priv_buf *priv;
2293 	struct hclge_desc desc[2];
2294 	int i, j;
2295 	int ret;
2296 
2297 	for (i = 0; i < 2; i++) {
2298 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2299 					   false);
2300 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2301 
2302 		/* The first descriptor set the NEXT bit to 1 */
2303 		if (i == 0)
2304 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2305 		else
2306 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2307 
2308 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2309 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2310 
2311 			priv = &buf_alloc->priv_buf[idx];
2312 			req->tc_wl[j].high =
2313 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2314 			req->tc_wl[j].high |=
2315 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2316 			req->tc_wl[j].low =
2317 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2318 			req->tc_wl[j].low |=
2319 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2320 		}
2321 	}
2322 
2323 	/* Send 2 descriptor at one time */
2324 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2325 	if (ret)
2326 		dev_err(&hdev->pdev->dev,
2327 			"rx private waterline config cmd failed %d\n",
2328 			ret);
2329 	return ret;
2330 }
2331 
hclge_common_thrd_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2332 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2333 				    struct hclge_pkt_buf_alloc *buf_alloc)
2334 {
2335 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2336 	struct hclge_rx_com_thrd *req;
2337 	struct hclge_desc desc[2];
2338 	struct hclge_tc_thrd *tc;
2339 	int i, j;
2340 	int ret;
2341 
2342 	for (i = 0; i < 2; i++) {
2343 		hclge_cmd_setup_basic_desc(&desc[i],
2344 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2345 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2346 
2347 		/* The first descriptor set the NEXT bit to 1 */
2348 		if (i == 0)
2349 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2350 		else
2351 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2352 
2353 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2354 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2355 
2356 			req->com_thrd[j].high =
2357 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2358 			req->com_thrd[j].high |=
2359 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2360 			req->com_thrd[j].low =
2361 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2362 			req->com_thrd[j].low |=
2363 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2364 		}
2365 	}
2366 
2367 	/* Send 2 descriptors at one time */
2368 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2369 	if (ret)
2370 		dev_err(&hdev->pdev->dev,
2371 			"common threshold config cmd failed %d\n", ret);
2372 	return ret;
2373 }
2374 
hclge_common_wl_config(struct hclge_dev * hdev,struct hclge_pkt_buf_alloc * buf_alloc)2375 static int hclge_common_wl_config(struct hclge_dev *hdev,
2376 				  struct hclge_pkt_buf_alloc *buf_alloc)
2377 {
2378 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2379 	struct hclge_rx_com_wl *req;
2380 	struct hclge_desc desc;
2381 	int ret;
2382 
2383 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2384 
2385 	req = (struct hclge_rx_com_wl *)desc.data;
2386 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2387 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2388 
2389 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2390 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2391 
2392 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2393 	if (ret)
2394 		dev_err(&hdev->pdev->dev,
2395 			"common waterline config cmd failed %d\n", ret);
2396 
2397 	return ret;
2398 }
2399 
hclge_buffer_alloc(struct hclge_dev * hdev)2400 int hclge_buffer_alloc(struct hclge_dev *hdev)
2401 {
2402 	struct hclge_pkt_buf_alloc *pkt_buf;
2403 	int ret;
2404 
2405 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2406 	if (!pkt_buf)
2407 		return -ENOMEM;
2408 
2409 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2410 	if (ret) {
2411 		dev_err(&hdev->pdev->dev,
2412 			"could not calc tx buffer size for all TCs %d\n", ret);
2413 		goto out;
2414 	}
2415 
2416 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2417 	if (ret) {
2418 		dev_err(&hdev->pdev->dev,
2419 			"could not alloc tx buffers %d\n", ret);
2420 		goto out;
2421 	}
2422 
2423 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2424 	if (ret) {
2425 		dev_err(&hdev->pdev->dev,
2426 			"could not calc rx priv buffer size for all TCs %d\n",
2427 			ret);
2428 		goto out;
2429 	}
2430 
2431 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2432 	if (ret) {
2433 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2434 			ret);
2435 		goto out;
2436 	}
2437 
2438 	if (hnae3_dev_dcb_supported(hdev)) {
2439 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2440 		if (ret) {
2441 			dev_err(&hdev->pdev->dev,
2442 				"could not configure rx private waterline %d\n",
2443 				ret);
2444 			goto out;
2445 		}
2446 
2447 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2448 		if (ret) {
2449 			dev_err(&hdev->pdev->dev,
2450 				"could not configure common threshold %d\n",
2451 				ret);
2452 			goto out;
2453 		}
2454 	}
2455 
2456 	ret = hclge_common_wl_config(hdev, pkt_buf);
2457 	if (ret)
2458 		dev_err(&hdev->pdev->dev,
2459 			"could not configure common waterline %d\n", ret);
2460 
2461 out:
2462 	kfree(pkt_buf);
2463 	return ret;
2464 }
2465 
hclge_init_roce_base_info(struct hclge_vport * vport)2466 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2467 {
2468 	struct hnae3_handle *roce = &vport->roce;
2469 	struct hnae3_handle *nic = &vport->nic;
2470 	struct hclge_dev *hdev = vport->back;
2471 
2472 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2473 
2474 	if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2475 		return -EINVAL;
2476 
2477 	roce->rinfo.base_vector = hdev->roce_base_vector;
2478 
2479 	roce->rinfo.netdev = nic->kinfo.netdev;
2480 	roce->rinfo.roce_io_base = hdev->hw.io_base;
2481 	roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2482 
2483 	roce->pdev = nic->pdev;
2484 	roce->ae_algo = nic->ae_algo;
2485 	roce->numa_node_mask = nic->numa_node_mask;
2486 
2487 	return 0;
2488 }
2489 
hclge_init_msi(struct hclge_dev * hdev)2490 static int hclge_init_msi(struct hclge_dev *hdev)
2491 {
2492 	struct pci_dev *pdev = hdev->pdev;
2493 	int vectors;
2494 	int i;
2495 
2496 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2497 					hdev->num_msi,
2498 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2499 	if (vectors < 0) {
2500 		dev_err(&pdev->dev,
2501 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2502 			vectors);
2503 		return vectors;
2504 	}
2505 	if (vectors < hdev->num_msi)
2506 		dev_warn(&hdev->pdev->dev,
2507 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2508 			 hdev->num_msi, vectors);
2509 
2510 	hdev->num_msi = vectors;
2511 	hdev->num_msi_left = vectors;
2512 
2513 	hdev->base_msi_vector = pdev->irq;
2514 	hdev->roce_base_vector = hdev->base_msi_vector +
2515 				hdev->num_nic_msi;
2516 
2517 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2518 					   sizeof(u16), GFP_KERNEL);
2519 	if (!hdev->vector_status) {
2520 		pci_free_irq_vectors(pdev);
2521 		return -ENOMEM;
2522 	}
2523 
2524 	for (i = 0; i < hdev->num_msi; i++)
2525 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2526 
2527 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2528 					sizeof(int), GFP_KERNEL);
2529 	if (!hdev->vector_irq) {
2530 		pci_free_irq_vectors(pdev);
2531 		return -ENOMEM;
2532 	}
2533 
2534 	return 0;
2535 }
2536 
hclge_check_speed_dup(u8 duplex,int speed)2537 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2538 {
2539 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2540 		duplex = HCLGE_MAC_FULL;
2541 
2542 	return duplex;
2543 }
2544 
hclge_cfg_mac_speed_dup_hw(struct hclge_dev * hdev,int speed,u8 duplex)2545 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2546 				      u8 duplex)
2547 {
2548 	struct hclge_config_mac_speed_dup_cmd *req;
2549 	struct hclge_desc desc;
2550 	int ret;
2551 
2552 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2553 
2554 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2555 
2556 	if (duplex)
2557 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2558 
2559 	switch (speed) {
2560 	case HCLGE_MAC_SPEED_10M:
2561 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2562 				HCLGE_CFG_SPEED_S, 6);
2563 		break;
2564 	case HCLGE_MAC_SPEED_100M:
2565 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2566 				HCLGE_CFG_SPEED_S, 7);
2567 		break;
2568 	case HCLGE_MAC_SPEED_1G:
2569 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2570 				HCLGE_CFG_SPEED_S, 0);
2571 		break;
2572 	case HCLGE_MAC_SPEED_10G:
2573 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2574 				HCLGE_CFG_SPEED_S, 1);
2575 		break;
2576 	case HCLGE_MAC_SPEED_25G:
2577 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2578 				HCLGE_CFG_SPEED_S, 2);
2579 		break;
2580 	case HCLGE_MAC_SPEED_40G:
2581 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2582 				HCLGE_CFG_SPEED_S, 3);
2583 		break;
2584 	case HCLGE_MAC_SPEED_50G:
2585 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2586 				HCLGE_CFG_SPEED_S, 4);
2587 		break;
2588 	case HCLGE_MAC_SPEED_100G:
2589 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2590 				HCLGE_CFG_SPEED_S, 5);
2591 		break;
2592 	case HCLGE_MAC_SPEED_200G:
2593 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2594 				HCLGE_CFG_SPEED_S, 8);
2595 		break;
2596 	default:
2597 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2598 		return -EINVAL;
2599 	}
2600 
2601 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2602 		      1);
2603 
2604 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2605 	if (ret) {
2606 		dev_err(&hdev->pdev->dev,
2607 			"mac speed/duplex config cmd failed %d.\n", ret);
2608 		return ret;
2609 	}
2610 
2611 	return 0;
2612 }
2613 
hclge_cfg_mac_speed_dup(struct hclge_dev * hdev,int speed,u8 duplex)2614 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2615 {
2616 	struct hclge_mac *mac = &hdev->hw.mac;
2617 	int ret;
2618 
2619 	duplex = hclge_check_speed_dup(duplex, speed);
2620 	if (!mac->support_autoneg && mac->speed == speed &&
2621 	    mac->duplex == duplex)
2622 		return 0;
2623 
2624 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2625 	if (ret)
2626 		return ret;
2627 
2628 	hdev->hw.mac.speed = speed;
2629 	hdev->hw.mac.duplex = duplex;
2630 
2631 	return 0;
2632 }
2633 
hclge_cfg_mac_speed_dup_h(struct hnae3_handle * handle,int speed,u8 duplex)2634 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2635 				     u8 duplex)
2636 {
2637 	struct hclge_vport *vport = hclge_get_vport(handle);
2638 	struct hclge_dev *hdev = vport->back;
2639 
2640 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2641 }
2642 
hclge_set_autoneg_en(struct hclge_dev * hdev,bool enable)2643 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2644 {
2645 	struct hclge_config_auto_neg_cmd *req;
2646 	struct hclge_desc desc;
2647 	u32 flag = 0;
2648 	int ret;
2649 
2650 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2651 
2652 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2653 	if (enable)
2654 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2655 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2656 
2657 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2658 	if (ret)
2659 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2660 			ret);
2661 
2662 	return ret;
2663 }
2664 
hclge_set_autoneg(struct hnae3_handle * handle,bool enable)2665 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2666 {
2667 	struct hclge_vport *vport = hclge_get_vport(handle);
2668 	struct hclge_dev *hdev = vport->back;
2669 
2670 	if (!hdev->hw.mac.support_autoneg) {
2671 		if (enable) {
2672 			dev_err(&hdev->pdev->dev,
2673 				"autoneg is not supported by current port\n");
2674 			return -EOPNOTSUPP;
2675 		} else {
2676 			return 0;
2677 		}
2678 	}
2679 
2680 	return hclge_set_autoneg_en(hdev, enable);
2681 }
2682 
hclge_get_autoneg(struct hnae3_handle * handle)2683 static int hclge_get_autoneg(struct hnae3_handle *handle)
2684 {
2685 	struct hclge_vport *vport = hclge_get_vport(handle);
2686 	struct hclge_dev *hdev = vport->back;
2687 	struct phy_device *phydev = hdev->hw.mac.phydev;
2688 
2689 	if (phydev)
2690 		return phydev->autoneg;
2691 
2692 	return hdev->hw.mac.autoneg;
2693 }
2694 
hclge_restart_autoneg(struct hnae3_handle * handle)2695 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2696 {
2697 	struct hclge_vport *vport = hclge_get_vport(handle);
2698 	struct hclge_dev *hdev = vport->back;
2699 	int ret;
2700 
2701 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2702 
2703 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2704 	if (ret)
2705 		return ret;
2706 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2707 }
2708 
hclge_halt_autoneg(struct hnae3_handle * handle,bool halt)2709 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2710 {
2711 	struct hclge_vport *vport = hclge_get_vport(handle);
2712 	struct hclge_dev *hdev = vport->back;
2713 
2714 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2715 		return hclge_set_autoneg_en(hdev, !halt);
2716 
2717 	return 0;
2718 }
2719 
hclge_set_fec_hw(struct hclge_dev * hdev,u32 fec_mode)2720 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2721 {
2722 	struct hclge_config_fec_cmd *req;
2723 	struct hclge_desc desc;
2724 	int ret;
2725 
2726 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2727 
2728 	req = (struct hclge_config_fec_cmd *)desc.data;
2729 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2730 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2731 	if (fec_mode & BIT(HNAE3_FEC_RS))
2732 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2733 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2734 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2735 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2736 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2737 
2738 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2739 	if (ret)
2740 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2741 
2742 	return ret;
2743 }
2744 
hclge_set_fec(struct hnae3_handle * handle,u32 fec_mode)2745 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2746 {
2747 	struct hclge_vport *vport = hclge_get_vport(handle);
2748 	struct hclge_dev *hdev = vport->back;
2749 	struct hclge_mac *mac = &hdev->hw.mac;
2750 	int ret;
2751 
2752 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2753 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2754 		return -EINVAL;
2755 	}
2756 
2757 	ret = hclge_set_fec_hw(hdev, fec_mode);
2758 	if (ret)
2759 		return ret;
2760 
2761 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2762 	return 0;
2763 }
2764 
hclge_get_fec(struct hnae3_handle * handle,u8 * fec_ability,u8 * fec_mode)2765 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2766 			  u8 *fec_mode)
2767 {
2768 	struct hclge_vport *vport = hclge_get_vport(handle);
2769 	struct hclge_dev *hdev = vport->back;
2770 	struct hclge_mac *mac = &hdev->hw.mac;
2771 
2772 	if (fec_ability)
2773 		*fec_ability = mac->fec_ability;
2774 	if (fec_mode)
2775 		*fec_mode = mac->fec_mode;
2776 }
2777 
hclge_mac_init(struct hclge_dev * hdev)2778 static int hclge_mac_init(struct hclge_dev *hdev)
2779 {
2780 	struct hclge_mac *mac = &hdev->hw.mac;
2781 	int ret;
2782 
2783 	hdev->support_sfp_query = true;
2784 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2785 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2786 					 hdev->hw.mac.duplex);
2787 	if (ret)
2788 		return ret;
2789 
2790 	if (hdev->hw.mac.support_autoneg) {
2791 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2792 		if (ret)
2793 			return ret;
2794 	}
2795 
2796 	mac->link = 0;
2797 
2798 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2799 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2800 		if (ret)
2801 			return ret;
2802 	}
2803 
2804 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2805 	if (ret) {
2806 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2807 		return ret;
2808 	}
2809 
2810 	ret = hclge_set_default_loopback(hdev);
2811 	if (ret)
2812 		return ret;
2813 
2814 	ret = hclge_buffer_alloc(hdev);
2815 	if (ret)
2816 		dev_err(&hdev->pdev->dev,
2817 			"allocate buffer fail, ret=%d\n", ret);
2818 
2819 	return ret;
2820 }
2821 
hclge_mbx_task_schedule(struct hclge_dev * hdev)2822 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2823 {
2824 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2825 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2826 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2827 				    hclge_wq, &hdev->service_task, 0);
2828 }
2829 
hclge_reset_task_schedule(struct hclge_dev * hdev)2830 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2831 {
2832 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2833 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2834 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2835 				    hclge_wq, &hdev->service_task, 0);
2836 }
2837 
hclge_task_schedule(struct hclge_dev * hdev,unsigned long delay_time)2838 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2839 {
2840 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2841 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2842 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2843 				    hclge_wq, &hdev->service_task,
2844 				    delay_time);
2845 }
2846 
hclge_get_mac_link_status(struct hclge_dev * hdev,int * link_status)2847 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2848 {
2849 	struct hclge_link_status_cmd *req;
2850 	struct hclge_desc desc;
2851 	int ret;
2852 
2853 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2854 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2855 	if (ret) {
2856 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2857 			ret);
2858 		return ret;
2859 	}
2860 
2861 	req = (struct hclge_link_status_cmd *)desc.data;
2862 	*link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2863 		HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2864 
2865 	return 0;
2866 }
2867 
hclge_get_mac_phy_link(struct hclge_dev * hdev,int * link_status)2868 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2869 {
2870 	struct phy_device *phydev = hdev->hw.mac.phydev;
2871 
2872 	*link_status = HCLGE_LINK_STATUS_DOWN;
2873 
2874 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2875 		return 0;
2876 
2877 	if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2878 		return 0;
2879 
2880 	return hclge_get_mac_link_status(hdev, link_status);
2881 }
2882 
hclge_push_link_status(struct hclge_dev * hdev)2883 static void hclge_push_link_status(struct hclge_dev *hdev)
2884 {
2885 	struct hclge_vport *vport;
2886 	int ret;
2887 	u16 i;
2888 
2889 	for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2890 		vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2891 
2892 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2893 		    vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2894 			continue;
2895 
2896 		ret = hclge_push_vf_link_status(vport);
2897 		if (ret) {
2898 			dev_err(&hdev->pdev->dev,
2899 				"failed to push link status to vf%u, ret = %d\n",
2900 				i, ret);
2901 		}
2902 	}
2903 }
2904 
hclge_update_link_status(struct hclge_dev * hdev)2905 static void hclge_update_link_status(struct hclge_dev *hdev)
2906 {
2907 	struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2908 	struct hnae3_handle *handle = &hdev->vport[0].nic;
2909 	struct hnae3_client *rclient = hdev->roce_client;
2910 	struct hnae3_client *client = hdev->nic_client;
2911 	int state;
2912 	int ret;
2913 
2914 	if (!client)
2915 		return;
2916 
2917 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2918 		return;
2919 
2920 	ret = hclge_get_mac_phy_link(hdev, &state);
2921 	if (ret) {
2922 		clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2923 		return;
2924 	}
2925 
2926 	if (state != hdev->hw.mac.link) {
2927 		client->ops->link_status_change(handle, state);
2928 		hclge_config_mac_tnl_int(hdev, state);
2929 		if (rclient && rclient->ops->link_status_change)
2930 			rclient->ops->link_status_change(rhandle, state);
2931 
2932 		hdev->hw.mac.link = state;
2933 		hclge_push_link_status(hdev);
2934 	}
2935 
2936 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2937 }
2938 
hclge_update_port_capability(struct hclge_dev * hdev,struct hclge_mac * mac)2939 static void hclge_update_port_capability(struct hclge_dev *hdev,
2940 					 struct hclge_mac *mac)
2941 {
2942 	if (hnae3_dev_fec_supported(hdev))
2943 		/* update fec ability by speed */
2944 		hclge_convert_setting_fec(mac);
2945 
2946 	/* firmware can not identify back plane type, the media type
2947 	 * read from configuration can help deal it
2948 	 */
2949 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2950 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2951 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2952 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2953 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2954 
2955 	if (mac->support_autoneg) {
2956 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2957 		linkmode_copy(mac->advertising, mac->supported);
2958 	} else {
2959 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2960 				   mac->supported);
2961 		linkmode_zero(mac->advertising);
2962 	}
2963 }
2964 
hclge_get_sfp_speed(struct hclge_dev * hdev,u32 * speed)2965 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2966 {
2967 	struct hclge_sfp_info_cmd *resp;
2968 	struct hclge_desc desc;
2969 	int ret;
2970 
2971 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2972 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2973 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2974 	if (ret == -EOPNOTSUPP) {
2975 		dev_warn(&hdev->pdev->dev,
2976 			 "IMP do not support get SFP speed %d\n", ret);
2977 		return ret;
2978 	} else if (ret) {
2979 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2980 		return ret;
2981 	}
2982 
2983 	*speed = le32_to_cpu(resp->speed);
2984 
2985 	return 0;
2986 }
2987 
hclge_get_sfp_info(struct hclge_dev * hdev,struct hclge_mac * mac)2988 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2989 {
2990 	struct hclge_sfp_info_cmd *resp;
2991 	struct hclge_desc desc;
2992 	int ret;
2993 
2994 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2995 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2996 
2997 	resp->query_type = QUERY_ACTIVE_SPEED;
2998 
2999 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3000 	if (ret == -EOPNOTSUPP) {
3001 		dev_warn(&hdev->pdev->dev,
3002 			 "IMP does not support get SFP info %d\n", ret);
3003 		return ret;
3004 	} else if (ret) {
3005 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3006 		return ret;
3007 	}
3008 
3009 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
3010 	 * set to mac->speed.
3011 	 */
3012 	if (!le32_to_cpu(resp->speed))
3013 		return 0;
3014 
3015 	mac->speed = le32_to_cpu(resp->speed);
3016 	/* if resp->speed_ability is 0, it means it's an old version
3017 	 * firmware, do not update these params
3018 	 */
3019 	if (resp->speed_ability) {
3020 		mac->module_type = le32_to_cpu(resp->module_type);
3021 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
3022 		mac->autoneg = resp->autoneg;
3023 		mac->support_autoneg = resp->autoneg_ability;
3024 		mac->speed_type = QUERY_ACTIVE_SPEED;
3025 		if (!resp->active_fec)
3026 			mac->fec_mode = 0;
3027 		else
3028 			mac->fec_mode = BIT(resp->active_fec);
3029 	} else {
3030 		mac->speed_type = QUERY_SFP_SPEED;
3031 	}
3032 
3033 	return 0;
3034 }
3035 
hclge_get_phy_link_ksettings(struct hnae3_handle * handle,struct ethtool_link_ksettings * cmd)3036 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3037 					struct ethtool_link_ksettings *cmd)
3038 {
3039 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3040 	struct hclge_vport *vport = hclge_get_vport(handle);
3041 	struct hclge_phy_link_ksetting_0_cmd *req0;
3042 	struct hclge_phy_link_ksetting_1_cmd *req1;
3043 	u32 supported, advertising, lp_advertising;
3044 	struct hclge_dev *hdev = vport->back;
3045 	int ret;
3046 
3047 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3048 				   true);
3049 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3050 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3051 				   true);
3052 
3053 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3054 	if (ret) {
3055 		dev_err(&hdev->pdev->dev,
3056 			"failed to get phy link ksetting, ret = %d.\n", ret);
3057 		return ret;
3058 	}
3059 
3060 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3061 	cmd->base.autoneg = req0->autoneg;
3062 	cmd->base.speed = le32_to_cpu(req0->speed);
3063 	cmd->base.duplex = req0->duplex;
3064 	cmd->base.port = req0->port;
3065 	cmd->base.transceiver = req0->transceiver;
3066 	cmd->base.phy_address = req0->phy_address;
3067 	cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3068 	cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3069 	supported = le32_to_cpu(req0->supported);
3070 	advertising = le32_to_cpu(req0->advertising);
3071 	lp_advertising = le32_to_cpu(req0->lp_advertising);
3072 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3073 						supported);
3074 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3075 						advertising);
3076 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3077 						lp_advertising);
3078 
3079 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3080 	cmd->base.master_slave_cfg = req1->master_slave_cfg;
3081 	cmd->base.master_slave_state = req1->master_slave_state;
3082 
3083 	return 0;
3084 }
3085 
3086 static int
hclge_set_phy_link_ksettings(struct hnae3_handle * handle,const struct ethtool_link_ksettings * cmd)3087 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3088 			     const struct ethtool_link_ksettings *cmd)
3089 {
3090 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3091 	struct hclge_vport *vport = hclge_get_vport(handle);
3092 	struct hclge_phy_link_ksetting_0_cmd *req0;
3093 	struct hclge_phy_link_ksetting_1_cmd *req1;
3094 	struct hclge_dev *hdev = vport->back;
3095 	u32 advertising;
3096 	int ret;
3097 
3098 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
3099 	    ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3100 	     (cmd->base.duplex != DUPLEX_HALF &&
3101 	      cmd->base.duplex != DUPLEX_FULL)))
3102 		return -EINVAL;
3103 
3104 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3105 				   false);
3106 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3107 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3108 				   false);
3109 
3110 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3111 	req0->autoneg = cmd->base.autoneg;
3112 	req0->speed = cpu_to_le32(cmd->base.speed);
3113 	req0->duplex = cmd->base.duplex;
3114 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
3115 						cmd->link_modes.advertising);
3116 	req0->advertising = cpu_to_le32(advertising);
3117 	req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3118 
3119 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3120 	req1->master_slave_cfg = cmd->base.master_slave_cfg;
3121 
3122 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3123 	if (ret) {
3124 		dev_err(&hdev->pdev->dev,
3125 			"failed to set phy link ksettings, ret = %d.\n", ret);
3126 		return ret;
3127 	}
3128 
3129 	hdev->hw.mac.autoneg = cmd->base.autoneg;
3130 	hdev->hw.mac.speed = cmd->base.speed;
3131 	hdev->hw.mac.duplex = cmd->base.duplex;
3132 	linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3133 
3134 	return 0;
3135 }
3136 
hclge_update_tp_port_info(struct hclge_dev * hdev)3137 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3138 {
3139 	struct ethtool_link_ksettings cmd;
3140 	int ret;
3141 
3142 	if (!hnae3_dev_phy_imp_supported(hdev))
3143 		return 0;
3144 
3145 	ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3146 	if (ret)
3147 		return ret;
3148 
3149 	hdev->hw.mac.autoneg = cmd.base.autoneg;
3150 	hdev->hw.mac.speed = cmd.base.speed;
3151 	hdev->hw.mac.duplex = cmd.base.duplex;
3152 
3153 	return 0;
3154 }
3155 
hclge_tp_port_init(struct hclge_dev * hdev)3156 static int hclge_tp_port_init(struct hclge_dev *hdev)
3157 {
3158 	struct ethtool_link_ksettings cmd;
3159 
3160 	if (!hnae3_dev_phy_imp_supported(hdev))
3161 		return 0;
3162 
3163 	cmd.base.autoneg = hdev->hw.mac.autoneg;
3164 	cmd.base.speed = hdev->hw.mac.speed;
3165 	cmd.base.duplex = hdev->hw.mac.duplex;
3166 	linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3167 
3168 	return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3169 }
3170 
hclge_update_port_info(struct hclge_dev * hdev)3171 static int hclge_update_port_info(struct hclge_dev *hdev)
3172 {
3173 	struct hclge_mac *mac = &hdev->hw.mac;
3174 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
3175 	int ret;
3176 
3177 	/* get the port info from SFP cmd if not copper port */
3178 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3179 		return hclge_update_tp_port_info(hdev);
3180 
3181 	/* if IMP does not support get SFP/qSFP info, return directly */
3182 	if (!hdev->support_sfp_query)
3183 		return 0;
3184 
3185 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3186 		ret = hclge_get_sfp_info(hdev, mac);
3187 	else
3188 		ret = hclge_get_sfp_speed(hdev, &speed);
3189 
3190 	if (ret == -EOPNOTSUPP) {
3191 		hdev->support_sfp_query = false;
3192 		return ret;
3193 	} else if (ret) {
3194 		return ret;
3195 	}
3196 
3197 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3198 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3199 			hclge_update_port_capability(hdev, mac);
3200 			return 0;
3201 		}
3202 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3203 					       HCLGE_MAC_FULL);
3204 	} else {
3205 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3206 			return 0; /* do nothing if no SFP */
3207 
3208 		/* must config full duplex for SFP */
3209 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3210 	}
3211 }
3212 
hclge_get_status(struct hnae3_handle * handle)3213 static int hclge_get_status(struct hnae3_handle *handle)
3214 {
3215 	struct hclge_vport *vport = hclge_get_vport(handle);
3216 	struct hclge_dev *hdev = vport->back;
3217 
3218 	hclge_update_link_status(hdev);
3219 
3220 	return hdev->hw.mac.link;
3221 }
3222 
hclge_get_vf_vport(struct hclge_dev * hdev,int vf)3223 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3224 {
3225 	if (!pci_num_vf(hdev->pdev)) {
3226 		dev_err(&hdev->pdev->dev,
3227 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
3228 		return NULL;
3229 	}
3230 
3231 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3232 		dev_err(&hdev->pdev->dev,
3233 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
3234 			vf, pci_num_vf(hdev->pdev));
3235 		return NULL;
3236 	}
3237 
3238 	/* VF start from 1 in vport */
3239 	vf += HCLGE_VF_VPORT_START_NUM;
3240 	return &hdev->vport[vf];
3241 }
3242 
hclge_get_vf_config(struct hnae3_handle * handle,int vf,struct ifla_vf_info * ivf)3243 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3244 			       struct ifla_vf_info *ivf)
3245 {
3246 	struct hclge_vport *vport = hclge_get_vport(handle);
3247 	struct hclge_dev *hdev = vport->back;
3248 
3249 	vport = hclge_get_vf_vport(hdev, vf);
3250 	if (!vport)
3251 		return -EINVAL;
3252 
3253 	ivf->vf = vf;
3254 	ivf->linkstate = vport->vf_info.link_state;
3255 	ivf->spoofchk = vport->vf_info.spoofchk;
3256 	ivf->trusted = vport->vf_info.trusted;
3257 	ivf->min_tx_rate = 0;
3258 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3259 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3260 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3261 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3262 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
3263 
3264 	return 0;
3265 }
3266 
hclge_set_vf_link_state(struct hnae3_handle * handle,int vf,int link_state)3267 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3268 				   int link_state)
3269 {
3270 	struct hclge_vport *vport = hclge_get_vport(handle);
3271 	struct hclge_dev *hdev = vport->back;
3272 	int link_state_old;
3273 	int ret;
3274 
3275 	vport = hclge_get_vf_vport(hdev, vf);
3276 	if (!vport)
3277 		return -EINVAL;
3278 
3279 	link_state_old = vport->vf_info.link_state;
3280 	vport->vf_info.link_state = link_state;
3281 
3282 	ret = hclge_push_vf_link_status(vport);
3283 	if (ret) {
3284 		vport->vf_info.link_state = link_state_old;
3285 		dev_err(&hdev->pdev->dev,
3286 			"failed to push vf%d link status, ret = %d\n", vf, ret);
3287 	}
3288 
3289 	return ret;
3290 }
3291 
hclge_check_event_cause(struct hclge_dev * hdev,u32 * clearval)3292 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3293 {
3294 	u32 cmdq_src_reg, msix_src_reg;
3295 
3296 	/* fetch the events from their corresponding regs */
3297 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3298 	msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3299 
3300 	/* Assumption: If by any chance reset and mailbox events are reported
3301 	 * together then we will only process reset event in this go and will
3302 	 * defer the processing of the mailbox events. Since, we would have not
3303 	 * cleared RX CMDQ event this time we would receive again another
3304 	 * interrupt from H/W just for the mailbox.
3305 	 *
3306 	 * check for vector0 reset event sources
3307 	 */
3308 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3309 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3310 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3311 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3312 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3313 		hdev->rst_stats.imp_rst_cnt++;
3314 		return HCLGE_VECTOR0_EVENT_RST;
3315 	}
3316 
3317 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3318 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3319 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3320 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3321 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3322 		hdev->rst_stats.global_rst_cnt++;
3323 		return HCLGE_VECTOR0_EVENT_RST;
3324 	}
3325 
3326 	/* check for vector0 msix event source */
3327 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3328 		*clearval = msix_src_reg;
3329 		return HCLGE_VECTOR0_EVENT_ERR;
3330 	}
3331 
3332 	/* check for vector0 mailbox(=CMDQ RX) event source */
3333 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3334 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3335 		*clearval = cmdq_src_reg;
3336 		return HCLGE_VECTOR0_EVENT_MBX;
3337 	}
3338 
3339 	/* print other vector0 event source */
3340 	dev_info(&hdev->pdev->dev,
3341 		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3342 		 cmdq_src_reg, msix_src_reg);
3343 	*clearval = msix_src_reg;
3344 
3345 	return HCLGE_VECTOR0_EVENT_OTHER;
3346 }
3347 
hclge_clear_event_cause(struct hclge_dev * hdev,u32 event_type,u32 regclr)3348 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3349 				    u32 regclr)
3350 {
3351 	switch (event_type) {
3352 	case HCLGE_VECTOR0_EVENT_RST:
3353 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3354 		break;
3355 	case HCLGE_VECTOR0_EVENT_MBX:
3356 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3357 		break;
3358 	default:
3359 		break;
3360 	}
3361 }
3362 
hclge_clear_all_event_cause(struct hclge_dev * hdev)3363 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3364 {
3365 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3366 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3367 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3368 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3369 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3370 }
3371 
hclge_enable_vector(struct hclge_misc_vector * vector,bool enable)3372 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3373 {
3374 	writel(enable ? 1 : 0, vector->addr);
3375 }
3376 
hclge_misc_irq_handle(int irq,void * data)3377 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3378 {
3379 	struct hclge_dev *hdev = data;
3380 	u32 clearval = 0;
3381 	u32 event_cause;
3382 
3383 	hclge_enable_vector(&hdev->misc_vector, false);
3384 	event_cause = hclge_check_event_cause(hdev, &clearval);
3385 
3386 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
3387 	switch (event_cause) {
3388 	case HCLGE_VECTOR0_EVENT_ERR:
3389 		/* we do not know what type of reset is required now. This could
3390 		 * only be decided after we fetch the type of errors which
3391 		 * caused this event. Therefore, we will do below for now:
3392 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3393 		 *    have defered type of reset to be used.
3394 		 * 2. Schedule the reset service task.
3395 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3396 		 *    will fetch the correct type of reset.  This would be done
3397 		 *    by first decoding the types of errors.
3398 		 */
3399 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3400 		fallthrough;
3401 	case HCLGE_VECTOR0_EVENT_RST:
3402 		hclge_reset_task_schedule(hdev);
3403 		break;
3404 	case HCLGE_VECTOR0_EVENT_MBX:
3405 		/* If we are here then,
3406 		 * 1. Either we are not handling any mbx task and we are not
3407 		 *    scheduled as well
3408 		 *                        OR
3409 		 * 2. We could be handling a mbx task but nothing more is
3410 		 *    scheduled.
3411 		 * In both cases, we should schedule mbx task as there are more
3412 		 * mbx messages reported by this interrupt.
3413 		 */
3414 		hclge_mbx_task_schedule(hdev);
3415 		break;
3416 	default:
3417 		dev_warn(&hdev->pdev->dev,
3418 			 "received unknown or unhandled event of vector0\n");
3419 		break;
3420 	}
3421 
3422 	hclge_clear_event_cause(hdev, event_cause, clearval);
3423 
3424 	/* Enable interrupt if it is not cause by reset. And when
3425 	 * clearval equal to 0, it means interrupt status may be
3426 	 * cleared by hardware before driver reads status register.
3427 	 * For this case, vector0 interrupt also should be enabled.
3428 	 */
3429 	if (!clearval ||
3430 	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3431 		hclge_enable_vector(&hdev->misc_vector, true);
3432 	}
3433 
3434 	return IRQ_HANDLED;
3435 }
3436 
hclge_free_vector(struct hclge_dev * hdev,int vector_id)3437 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3438 {
3439 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3440 		dev_warn(&hdev->pdev->dev,
3441 			 "vector(vector_id %d) has been freed.\n", vector_id);
3442 		return;
3443 	}
3444 
3445 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3446 	hdev->num_msi_left += 1;
3447 	hdev->num_msi_used -= 1;
3448 }
3449 
hclge_get_misc_vector(struct hclge_dev * hdev)3450 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3451 {
3452 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3453 
3454 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3455 
3456 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3457 	hdev->vector_status[0] = 0;
3458 
3459 	hdev->num_msi_left -= 1;
3460 	hdev->num_msi_used += 1;
3461 }
3462 
hclge_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)3463 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3464 				      const cpumask_t *mask)
3465 {
3466 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3467 					      affinity_notify);
3468 
3469 	cpumask_copy(&hdev->affinity_mask, mask);
3470 }
3471 
hclge_irq_affinity_release(struct kref * ref)3472 static void hclge_irq_affinity_release(struct kref *ref)
3473 {
3474 }
3475 
hclge_misc_affinity_setup(struct hclge_dev * hdev)3476 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3477 {
3478 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3479 			      &hdev->affinity_mask);
3480 
3481 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3482 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3483 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3484 				  &hdev->affinity_notify);
3485 }
3486 
hclge_misc_affinity_teardown(struct hclge_dev * hdev)3487 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3488 {
3489 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3490 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3491 }
3492 
hclge_misc_irq_init(struct hclge_dev * hdev)3493 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3494 {
3495 	int ret;
3496 
3497 	hclge_get_misc_vector(hdev);
3498 
3499 	/* this would be explicitly freed in the end */
3500 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3501 		 HCLGE_NAME, pci_name(hdev->pdev));
3502 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3503 			  0, hdev->misc_vector.name, hdev);
3504 	if (ret) {
3505 		hclge_free_vector(hdev, 0);
3506 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3507 			hdev->misc_vector.vector_irq);
3508 	}
3509 
3510 	return ret;
3511 }
3512 
hclge_misc_irq_uninit(struct hclge_dev * hdev)3513 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3514 {
3515 	free_irq(hdev->misc_vector.vector_irq, hdev);
3516 	hclge_free_vector(hdev, 0);
3517 }
3518 
hclge_notify_client(struct hclge_dev * hdev,enum hnae3_reset_notify_type type)3519 int hclge_notify_client(struct hclge_dev *hdev,
3520 			enum hnae3_reset_notify_type type)
3521 {
3522 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3523 	struct hnae3_client *client = hdev->nic_client;
3524 	int ret;
3525 
3526 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3527 		return 0;
3528 
3529 	if (!client->ops->reset_notify)
3530 		return -EOPNOTSUPP;
3531 
3532 	ret = client->ops->reset_notify(handle, type);
3533 	if (ret)
3534 		dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3535 			type, ret);
3536 
3537 	return ret;
3538 }
3539 
hclge_notify_roce_client(struct hclge_dev * hdev,enum hnae3_reset_notify_type type)3540 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3541 				    enum hnae3_reset_notify_type type)
3542 {
3543 	struct hnae3_handle *handle = &hdev->vport[0].roce;
3544 	struct hnae3_client *client = hdev->roce_client;
3545 	int ret;
3546 
3547 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3548 		return 0;
3549 
3550 	if (!client->ops->reset_notify)
3551 		return -EOPNOTSUPP;
3552 
3553 	ret = client->ops->reset_notify(handle, type);
3554 	if (ret)
3555 		dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3556 			type, ret);
3557 
3558 	return ret;
3559 }
3560 
hclge_reset_wait(struct hclge_dev * hdev)3561 static int hclge_reset_wait(struct hclge_dev *hdev)
3562 {
3563 #define HCLGE_RESET_WATI_MS	100
3564 #define HCLGE_RESET_WAIT_CNT	350
3565 
3566 	u32 val, reg, reg_bit;
3567 	u32 cnt = 0;
3568 
3569 	switch (hdev->reset_type) {
3570 	case HNAE3_IMP_RESET:
3571 		reg = HCLGE_GLOBAL_RESET_REG;
3572 		reg_bit = HCLGE_IMP_RESET_BIT;
3573 		break;
3574 	case HNAE3_GLOBAL_RESET:
3575 		reg = HCLGE_GLOBAL_RESET_REG;
3576 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3577 		break;
3578 	case HNAE3_FUNC_RESET:
3579 		reg = HCLGE_FUN_RST_ING;
3580 		reg_bit = HCLGE_FUN_RST_ING_B;
3581 		break;
3582 	default:
3583 		dev_err(&hdev->pdev->dev,
3584 			"Wait for unsupported reset type: %d\n",
3585 			hdev->reset_type);
3586 		return -EINVAL;
3587 	}
3588 
3589 	val = hclge_read_dev(&hdev->hw, reg);
3590 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3591 		msleep(HCLGE_RESET_WATI_MS);
3592 		val = hclge_read_dev(&hdev->hw, reg);
3593 		cnt++;
3594 	}
3595 
3596 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3597 		dev_warn(&hdev->pdev->dev,
3598 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3599 		return -EBUSY;
3600 	}
3601 
3602 	return 0;
3603 }
3604 
hclge_set_vf_rst(struct hclge_dev * hdev,int func_id,bool reset)3605 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3606 {
3607 	struct hclge_vf_rst_cmd *req;
3608 	struct hclge_desc desc;
3609 
3610 	req = (struct hclge_vf_rst_cmd *)desc.data;
3611 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3612 	req->dest_vfid = func_id;
3613 
3614 	if (reset)
3615 		req->vf_rst = 0x1;
3616 
3617 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3618 }
3619 
hclge_set_all_vf_rst(struct hclge_dev * hdev,bool reset)3620 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3621 {
3622 	int i;
3623 
3624 	for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3625 		struct hclge_vport *vport = &hdev->vport[i];
3626 		int ret;
3627 
3628 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3629 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3630 		if (ret) {
3631 			dev_err(&hdev->pdev->dev,
3632 				"set vf(%u) rst failed %d!\n",
3633 				vport->vport_id, ret);
3634 			return ret;
3635 		}
3636 
3637 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3638 			continue;
3639 
3640 		/* Inform VF to process the reset.
3641 		 * hclge_inform_reset_assert_to_vf may fail if VF
3642 		 * driver is not loaded.
3643 		 */
3644 		ret = hclge_inform_reset_assert_to_vf(vport);
3645 		if (ret)
3646 			dev_warn(&hdev->pdev->dev,
3647 				 "inform reset to vf(%u) failed %d!\n",
3648 				 vport->vport_id, ret);
3649 	}
3650 
3651 	return 0;
3652 }
3653 
hclge_mailbox_service_task(struct hclge_dev * hdev)3654 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3655 {
3656 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3657 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3658 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3659 		return;
3660 
3661 	hclge_mbx_handler(hdev);
3662 
3663 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3664 }
3665 
hclge_func_reset_sync_vf(struct hclge_dev * hdev)3666 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3667 {
3668 	struct hclge_pf_rst_sync_cmd *req;
3669 	struct hclge_desc desc;
3670 	int cnt = 0;
3671 	int ret;
3672 
3673 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3674 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3675 
3676 	do {
3677 		/* vf need to down netdev by mbx during PF or FLR reset */
3678 		hclge_mailbox_service_task(hdev);
3679 
3680 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3681 		/* for compatible with old firmware, wait
3682 		 * 100 ms for VF to stop IO
3683 		 */
3684 		if (ret == -EOPNOTSUPP) {
3685 			msleep(HCLGE_RESET_SYNC_TIME);
3686 			return;
3687 		} else if (ret) {
3688 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3689 				 ret);
3690 			return;
3691 		} else if (req->all_vf_ready) {
3692 			return;
3693 		}
3694 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3695 		hclge_cmd_reuse_desc(&desc, true);
3696 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3697 
3698 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3699 }
3700 
hclge_report_hw_error(struct hclge_dev * hdev,enum hnae3_hw_error_type type)3701 void hclge_report_hw_error(struct hclge_dev *hdev,
3702 			   enum hnae3_hw_error_type type)
3703 {
3704 	struct hnae3_client *client = hdev->nic_client;
3705 
3706 	if (!client || !client->ops->process_hw_error ||
3707 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3708 		return;
3709 
3710 	client->ops->process_hw_error(&hdev->vport[0].nic, type);
3711 }
3712 
hclge_handle_imp_error(struct hclge_dev * hdev)3713 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3714 {
3715 	u32 reg_val;
3716 
3717 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3718 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3719 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3720 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3721 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3722 	}
3723 
3724 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3725 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3726 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3727 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3728 	}
3729 }
3730 
hclge_func_reset_cmd(struct hclge_dev * hdev,int func_id)3731 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3732 {
3733 	struct hclge_desc desc;
3734 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3735 	int ret;
3736 
3737 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3738 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3739 	req->fun_reset_vfid = func_id;
3740 
3741 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3742 	if (ret)
3743 		dev_err(&hdev->pdev->dev,
3744 			"send function reset cmd fail, status =%d\n", ret);
3745 
3746 	return ret;
3747 }
3748 
hclge_do_reset(struct hclge_dev * hdev)3749 static void hclge_do_reset(struct hclge_dev *hdev)
3750 {
3751 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3752 	struct pci_dev *pdev = hdev->pdev;
3753 	u32 val;
3754 
3755 	if (hclge_get_hw_reset_stat(handle)) {
3756 		dev_info(&pdev->dev, "hardware reset not finish\n");
3757 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3758 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3759 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3760 		return;
3761 	}
3762 
3763 	switch (hdev->reset_type) {
3764 	case HNAE3_GLOBAL_RESET:
3765 		dev_info(&pdev->dev, "global reset requested\n");
3766 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3767 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3768 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3769 		break;
3770 	case HNAE3_FUNC_RESET:
3771 		dev_info(&pdev->dev, "PF reset requested\n");
3772 		/* schedule again to check later */
3773 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3774 		hclge_reset_task_schedule(hdev);
3775 		break;
3776 	default:
3777 		dev_warn(&pdev->dev,
3778 			 "unsupported reset type: %d\n", hdev->reset_type);
3779 		break;
3780 	}
3781 }
3782 
hclge_get_reset_level(struct hnae3_ae_dev * ae_dev,unsigned long * addr)3783 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3784 						   unsigned long *addr)
3785 {
3786 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3787 	struct hclge_dev *hdev = ae_dev->priv;
3788 
3789 	/* first, resolve any unknown reset type to the known type(s) */
3790 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3791 		u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3792 					HCLGE_MISC_VECTOR_INT_STS);
3793 		/* we will intentionally ignore any errors from this function
3794 		 *  as we will end up in *some* reset request in any case
3795 		 */
3796 		if (hclge_handle_hw_msix_error(hdev, addr))
3797 			dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3798 				 msix_sts_reg);
3799 
3800 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3801 		/* We defered the clearing of the error event which caused
3802 		 * interrupt since it was not posssible to do that in
3803 		 * interrupt context (and this is the reason we introduced
3804 		 * new UNKNOWN reset type). Now, the errors have been
3805 		 * handled and cleared in hardware we can safely enable
3806 		 * interrupts. This is an exception to the norm.
3807 		 */
3808 		hclge_enable_vector(&hdev->misc_vector, true);
3809 	}
3810 
3811 	/* return the highest priority reset level amongst all */
3812 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3813 		rst_level = HNAE3_IMP_RESET;
3814 		clear_bit(HNAE3_IMP_RESET, addr);
3815 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3816 		clear_bit(HNAE3_FUNC_RESET, addr);
3817 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3818 		rst_level = HNAE3_GLOBAL_RESET;
3819 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3820 		clear_bit(HNAE3_FUNC_RESET, addr);
3821 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3822 		rst_level = HNAE3_FUNC_RESET;
3823 		clear_bit(HNAE3_FUNC_RESET, addr);
3824 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3825 		rst_level = HNAE3_FLR_RESET;
3826 		clear_bit(HNAE3_FLR_RESET, addr);
3827 	}
3828 
3829 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3830 	    rst_level < hdev->reset_type)
3831 		return HNAE3_NONE_RESET;
3832 
3833 	return rst_level;
3834 }
3835 
hclge_clear_reset_cause(struct hclge_dev * hdev)3836 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3837 {
3838 	u32 clearval = 0;
3839 
3840 	switch (hdev->reset_type) {
3841 	case HNAE3_IMP_RESET:
3842 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3843 		break;
3844 	case HNAE3_GLOBAL_RESET:
3845 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3846 		break;
3847 	default:
3848 		break;
3849 	}
3850 
3851 	if (!clearval)
3852 		return;
3853 
3854 	/* For revision 0x20, the reset interrupt source
3855 	 * can only be cleared after hardware reset done
3856 	 */
3857 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3858 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3859 				clearval);
3860 
3861 	hclge_enable_vector(&hdev->misc_vector, true);
3862 }
3863 
hclge_reset_handshake(struct hclge_dev * hdev,bool enable)3864 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3865 {
3866 	u32 reg_val;
3867 
3868 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3869 	if (enable)
3870 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3871 	else
3872 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3873 
3874 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3875 }
3876 
hclge_func_reset_notify_vf(struct hclge_dev * hdev)3877 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3878 {
3879 	int ret;
3880 
3881 	ret = hclge_set_all_vf_rst(hdev, true);
3882 	if (ret)
3883 		return ret;
3884 
3885 	hclge_func_reset_sync_vf(hdev);
3886 
3887 	return 0;
3888 }
3889 
hclge_reset_prepare_wait(struct hclge_dev * hdev)3890 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3891 {
3892 	u32 reg_val;
3893 	int ret = 0;
3894 
3895 	switch (hdev->reset_type) {
3896 	case HNAE3_FUNC_RESET:
3897 		ret = hclge_func_reset_notify_vf(hdev);
3898 		if (ret)
3899 			return ret;
3900 
3901 		ret = hclge_func_reset_cmd(hdev, 0);
3902 		if (ret) {
3903 			dev_err(&hdev->pdev->dev,
3904 				"asserting function reset fail %d!\n", ret);
3905 			return ret;
3906 		}
3907 
3908 		/* After performaning pf reset, it is not necessary to do the
3909 		 * mailbox handling or send any command to firmware, because
3910 		 * any mailbox handling or command to firmware is only valid
3911 		 * after hclge_cmd_init is called.
3912 		 */
3913 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3914 		hdev->rst_stats.pf_rst_cnt++;
3915 		break;
3916 	case HNAE3_FLR_RESET:
3917 		ret = hclge_func_reset_notify_vf(hdev);
3918 		if (ret)
3919 			return ret;
3920 		break;
3921 	case HNAE3_IMP_RESET:
3922 		hclge_handle_imp_error(hdev);
3923 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3924 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3925 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3926 		break;
3927 	default:
3928 		break;
3929 	}
3930 
3931 	/* inform hardware that preparatory work is done */
3932 	msleep(HCLGE_RESET_SYNC_TIME);
3933 	hclge_reset_handshake(hdev, true);
3934 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3935 
3936 	return ret;
3937 }
3938 
hclge_reset_err_handle(struct hclge_dev * hdev)3939 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3940 {
3941 #define MAX_RESET_FAIL_CNT 5
3942 
3943 	if (hdev->reset_pending) {
3944 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3945 			 hdev->reset_pending);
3946 		return true;
3947 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3948 		   HCLGE_RESET_INT_M) {
3949 		dev_info(&hdev->pdev->dev,
3950 			 "reset failed because new reset interrupt\n");
3951 		hclge_clear_reset_cause(hdev);
3952 		return false;
3953 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3954 		hdev->rst_stats.reset_fail_cnt++;
3955 		set_bit(hdev->reset_type, &hdev->reset_pending);
3956 		dev_info(&hdev->pdev->dev,
3957 			 "re-schedule reset task(%u)\n",
3958 			 hdev->rst_stats.reset_fail_cnt);
3959 		return true;
3960 	}
3961 
3962 	hclge_clear_reset_cause(hdev);
3963 
3964 	/* recover the handshake status when reset fail */
3965 	hclge_reset_handshake(hdev, true);
3966 
3967 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3968 
3969 	hclge_dbg_dump_rst_info(hdev);
3970 
3971 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3972 
3973 	return false;
3974 }
3975 
hclge_update_reset_level(struct hclge_dev * hdev)3976 static void hclge_update_reset_level(struct hclge_dev *hdev)
3977 {
3978 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3979 	enum hnae3_reset_type reset_level;
3980 
3981 	/* reset request will not be set during reset, so clear
3982 	 * pending reset request to avoid unnecessary reset
3983 	 * caused by the same reason.
3984 	 */
3985 	hclge_get_reset_level(ae_dev, &hdev->reset_request);
3986 
3987 	/* if default_reset_request has a higher level reset request,
3988 	 * it should be handled as soon as possible. since some errors
3989 	 * need this kind of reset to fix.
3990 	 */
3991 	reset_level = hclge_get_reset_level(ae_dev,
3992 					    &hdev->default_reset_request);
3993 	if (reset_level != HNAE3_NONE_RESET)
3994 		set_bit(reset_level, &hdev->reset_request);
3995 }
3996 
hclge_set_rst_done(struct hclge_dev * hdev)3997 static int hclge_set_rst_done(struct hclge_dev *hdev)
3998 {
3999 	struct hclge_pf_rst_done_cmd *req;
4000 	struct hclge_desc desc;
4001 	int ret;
4002 
4003 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
4004 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4005 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4006 
4007 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4008 	/* To be compatible with the old firmware, which does not support
4009 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4010 	 * return success
4011 	 */
4012 	if (ret == -EOPNOTSUPP) {
4013 		dev_warn(&hdev->pdev->dev,
4014 			 "current firmware does not support command(0x%x)!\n",
4015 			 HCLGE_OPC_PF_RST_DONE);
4016 		return 0;
4017 	} else if (ret) {
4018 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4019 			ret);
4020 	}
4021 
4022 	return ret;
4023 }
4024 
hclge_reset_prepare_up(struct hclge_dev * hdev)4025 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4026 {
4027 	int ret = 0;
4028 
4029 	switch (hdev->reset_type) {
4030 	case HNAE3_FUNC_RESET:
4031 	case HNAE3_FLR_RESET:
4032 		ret = hclge_set_all_vf_rst(hdev, false);
4033 		break;
4034 	case HNAE3_GLOBAL_RESET:
4035 	case HNAE3_IMP_RESET:
4036 		ret = hclge_set_rst_done(hdev);
4037 		break;
4038 	default:
4039 		break;
4040 	}
4041 
4042 	/* clear up the handshake status after re-initialize done */
4043 	hclge_reset_handshake(hdev, false);
4044 
4045 	return ret;
4046 }
4047 
hclge_reset_stack(struct hclge_dev * hdev)4048 static int hclge_reset_stack(struct hclge_dev *hdev)
4049 {
4050 	int ret;
4051 
4052 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4053 	if (ret)
4054 		return ret;
4055 
4056 	ret = hclge_reset_ae_dev(hdev->ae_dev);
4057 	if (ret)
4058 		return ret;
4059 
4060 	return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4061 }
4062 
hclge_reset_prepare(struct hclge_dev * hdev)4063 static int hclge_reset_prepare(struct hclge_dev *hdev)
4064 {
4065 	int ret;
4066 
4067 	hdev->rst_stats.reset_cnt++;
4068 	/* perform reset of the stack & ae device for a client */
4069 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4070 	if (ret)
4071 		return ret;
4072 
4073 	rtnl_lock();
4074 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4075 	rtnl_unlock();
4076 	if (ret)
4077 		return ret;
4078 
4079 	return hclge_reset_prepare_wait(hdev);
4080 }
4081 
hclge_reset_rebuild(struct hclge_dev * hdev)4082 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4083 {
4084 	int ret;
4085 
4086 	hdev->rst_stats.hw_reset_done_cnt++;
4087 
4088 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4089 	if (ret)
4090 		return ret;
4091 
4092 	rtnl_lock();
4093 	ret = hclge_reset_stack(hdev);
4094 	rtnl_unlock();
4095 	if (ret)
4096 		return ret;
4097 
4098 	hclge_clear_reset_cause(hdev);
4099 
4100 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4101 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4102 	 * times
4103 	 */
4104 	if (ret &&
4105 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4106 		return ret;
4107 
4108 	ret = hclge_reset_prepare_up(hdev);
4109 	if (ret)
4110 		return ret;
4111 
4112 	rtnl_lock();
4113 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4114 	rtnl_unlock();
4115 	if (ret)
4116 		return ret;
4117 
4118 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4119 	if (ret)
4120 		return ret;
4121 
4122 	hdev->last_reset_time = jiffies;
4123 	hdev->rst_stats.reset_fail_cnt = 0;
4124 	hdev->rst_stats.reset_done_cnt++;
4125 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4126 
4127 	hclge_update_reset_level(hdev);
4128 
4129 	return 0;
4130 }
4131 
hclge_reset(struct hclge_dev * hdev)4132 static void hclge_reset(struct hclge_dev *hdev)
4133 {
4134 	if (hclge_reset_prepare(hdev))
4135 		goto err_reset;
4136 
4137 	if (hclge_reset_wait(hdev))
4138 		goto err_reset;
4139 
4140 	if (hclge_reset_rebuild(hdev))
4141 		goto err_reset;
4142 
4143 	return;
4144 
4145 err_reset:
4146 	if (hclge_reset_err_handle(hdev))
4147 		hclge_reset_task_schedule(hdev);
4148 }
4149 
hclge_reset_event(struct pci_dev * pdev,struct hnae3_handle * handle)4150 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4151 {
4152 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4153 	struct hclge_dev *hdev = ae_dev->priv;
4154 
4155 	/* We might end up getting called broadly because of 2 below cases:
4156 	 * 1. Recoverable error was conveyed through APEI and only way to bring
4157 	 *    normalcy is to reset.
4158 	 * 2. A new reset request from the stack due to timeout
4159 	 *
4160 	 * check if this is a new reset request and we are not here just because
4161 	 * last reset attempt did not succeed and watchdog hit us again. We will
4162 	 * know this if last reset request did not occur very recently (watchdog
4163 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4164 	 * In case of new request we reset the "reset level" to PF reset.
4165 	 * And if it is a repeat reset request of the most recent one then we
4166 	 * want to make sure we throttle the reset request. Therefore, we will
4167 	 * not allow it again before 3*HZ times.
4168 	 */
4169 
4170 	if (time_before(jiffies, (hdev->last_reset_time +
4171 				  HCLGE_RESET_INTERVAL))) {
4172 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4173 		return;
4174 	}
4175 
4176 	if (hdev->default_reset_request) {
4177 		hdev->reset_level =
4178 			hclge_get_reset_level(ae_dev,
4179 					      &hdev->default_reset_request);
4180 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4181 		hdev->reset_level = HNAE3_FUNC_RESET;
4182 	}
4183 
4184 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4185 		 hdev->reset_level);
4186 
4187 	/* request reset & schedule reset task */
4188 	set_bit(hdev->reset_level, &hdev->reset_request);
4189 	hclge_reset_task_schedule(hdev);
4190 
4191 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4192 		hdev->reset_level++;
4193 }
4194 
hclge_set_def_reset_request(struct hnae3_ae_dev * ae_dev,enum hnae3_reset_type rst_type)4195 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4196 					enum hnae3_reset_type rst_type)
4197 {
4198 	struct hclge_dev *hdev = ae_dev->priv;
4199 
4200 	set_bit(rst_type, &hdev->default_reset_request);
4201 }
4202 
hclge_reset_timer(struct timer_list * t)4203 static void hclge_reset_timer(struct timer_list *t)
4204 {
4205 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4206 
4207 	/* if default_reset_request has no value, it means that this reset
4208 	 * request has already be handled, so just return here
4209 	 */
4210 	if (!hdev->default_reset_request)
4211 		return;
4212 
4213 	dev_info(&hdev->pdev->dev,
4214 		 "triggering reset in reset timer\n");
4215 	hclge_reset_event(hdev->pdev, NULL);
4216 }
4217 
hclge_reset_subtask(struct hclge_dev * hdev)4218 static void hclge_reset_subtask(struct hclge_dev *hdev)
4219 {
4220 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4221 
4222 	/* check if there is any ongoing reset in the hardware. This status can
4223 	 * be checked from reset_pending. If there is then, we need to wait for
4224 	 * hardware to complete reset.
4225 	 *    a. If we are able to figure out in reasonable time that hardware
4226 	 *       has fully resetted then, we can proceed with driver, client
4227 	 *       reset.
4228 	 *    b. else, we can come back later to check this status so re-sched
4229 	 *       now.
4230 	 */
4231 	hdev->last_reset_time = jiffies;
4232 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4233 	if (hdev->reset_type != HNAE3_NONE_RESET)
4234 		hclge_reset(hdev);
4235 
4236 	/* check if we got any *new* reset requests to be honored */
4237 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4238 	if (hdev->reset_type != HNAE3_NONE_RESET)
4239 		hclge_do_reset(hdev);
4240 
4241 	hdev->reset_type = HNAE3_NONE_RESET;
4242 }
4243 
hclge_reset_service_task(struct hclge_dev * hdev)4244 static void hclge_reset_service_task(struct hclge_dev *hdev)
4245 {
4246 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4247 		return;
4248 
4249 	down(&hdev->reset_sem);
4250 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4251 
4252 	hclge_reset_subtask(hdev);
4253 
4254 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4255 	up(&hdev->reset_sem);
4256 }
4257 
hclge_update_vport_alive(struct hclge_dev * hdev)4258 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4259 {
4260 	int i;
4261 
4262 	/* start from vport 1 for PF is always alive */
4263 	for (i = 1; i < hdev->num_alloc_vport; i++) {
4264 		struct hclge_vport *vport = &hdev->vport[i];
4265 
4266 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4267 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4268 
4269 		/* If vf is not alive, set to default value */
4270 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4271 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4272 	}
4273 }
4274 
hclge_periodic_service_task(struct hclge_dev * hdev)4275 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4276 {
4277 	unsigned long delta = round_jiffies_relative(HZ);
4278 
4279 	if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4280 		return;
4281 
4282 	/* Always handle the link updating to make sure link state is
4283 	 * updated when it is triggered by mbx.
4284 	 */
4285 	hclge_update_link_status(hdev);
4286 	hclge_sync_mac_table(hdev);
4287 	hclge_sync_promisc_mode(hdev);
4288 	hclge_sync_fd_table(hdev);
4289 
4290 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4291 		delta = jiffies - hdev->last_serv_processed;
4292 
4293 		if (delta < round_jiffies_relative(HZ)) {
4294 			delta = round_jiffies_relative(HZ) - delta;
4295 			goto out;
4296 		}
4297 	}
4298 
4299 	hdev->serv_processed_cnt++;
4300 	hclge_update_vport_alive(hdev);
4301 
4302 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4303 		hdev->last_serv_processed = jiffies;
4304 		goto out;
4305 	}
4306 
4307 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4308 		hclge_update_stats_for_all(hdev);
4309 
4310 	hclge_update_port_info(hdev);
4311 	hclge_sync_vlan_filter(hdev);
4312 
4313 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4314 		hclge_rfs_filter_expire(hdev);
4315 
4316 	hdev->last_serv_processed = jiffies;
4317 
4318 out:
4319 	hclge_task_schedule(hdev, delta);
4320 }
4321 
hclge_service_task(struct work_struct * work)4322 static void hclge_service_task(struct work_struct *work)
4323 {
4324 	struct hclge_dev *hdev =
4325 		container_of(work, struct hclge_dev, service_task.work);
4326 
4327 	hclge_reset_service_task(hdev);
4328 	hclge_mailbox_service_task(hdev);
4329 	hclge_periodic_service_task(hdev);
4330 
4331 	/* Handle reset and mbx again in case periodical task delays the
4332 	 * handling by calling hclge_task_schedule() in
4333 	 * hclge_periodic_service_task().
4334 	 */
4335 	hclge_reset_service_task(hdev);
4336 	hclge_mailbox_service_task(hdev);
4337 }
4338 
hclge_get_vport(struct hnae3_handle * handle)4339 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4340 {
4341 	/* VF handle has no client */
4342 	if (!handle->client)
4343 		return container_of(handle, struct hclge_vport, nic);
4344 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4345 		return container_of(handle, struct hclge_vport, roce);
4346 	else
4347 		return container_of(handle, struct hclge_vport, nic);
4348 }
4349 
hclge_get_vector_info(struct hclge_dev * hdev,u16 idx,struct hnae3_vector_info * vector_info)4350 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4351 				  struct hnae3_vector_info *vector_info)
4352 {
4353 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2	64
4354 
4355 	vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4356 
4357 	/* need an extend offset to config vector >= 64 */
4358 	if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4359 		vector_info->io_addr = hdev->hw.io_base +
4360 				HCLGE_VECTOR_REG_BASE +
4361 				(idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4362 	else
4363 		vector_info->io_addr = hdev->hw.io_base +
4364 				HCLGE_VECTOR_EXT_REG_BASE +
4365 				(idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4366 				HCLGE_VECTOR_REG_OFFSET_H +
4367 				(idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4368 				HCLGE_VECTOR_REG_OFFSET;
4369 
4370 	hdev->vector_status[idx] = hdev->vport[0].vport_id;
4371 	hdev->vector_irq[idx] = vector_info->vector;
4372 }
4373 
hclge_get_vector(struct hnae3_handle * handle,u16 vector_num,struct hnae3_vector_info * vector_info)4374 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4375 			    struct hnae3_vector_info *vector_info)
4376 {
4377 	struct hclge_vport *vport = hclge_get_vport(handle);
4378 	struct hnae3_vector_info *vector = vector_info;
4379 	struct hclge_dev *hdev = vport->back;
4380 	int alloc = 0;
4381 	u16 i = 0;
4382 	u16 j;
4383 
4384 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4385 	vector_num = min(hdev->num_msi_left, vector_num);
4386 
4387 	for (j = 0; j < vector_num; j++) {
4388 		while (++i < hdev->num_nic_msi) {
4389 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4390 				hclge_get_vector_info(hdev, i, vector);
4391 				vector++;
4392 				alloc++;
4393 
4394 				break;
4395 			}
4396 		}
4397 	}
4398 	hdev->num_msi_left -= alloc;
4399 	hdev->num_msi_used += alloc;
4400 
4401 	return alloc;
4402 }
4403 
hclge_get_vector_index(struct hclge_dev * hdev,int vector)4404 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4405 {
4406 	int i;
4407 
4408 	for (i = 0; i < hdev->num_msi; i++)
4409 		if (vector == hdev->vector_irq[i])
4410 			return i;
4411 
4412 	return -EINVAL;
4413 }
4414 
hclge_put_vector(struct hnae3_handle * handle,int vector)4415 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4416 {
4417 	struct hclge_vport *vport = hclge_get_vport(handle);
4418 	struct hclge_dev *hdev = vport->back;
4419 	int vector_id;
4420 
4421 	vector_id = hclge_get_vector_index(hdev, vector);
4422 	if (vector_id < 0) {
4423 		dev_err(&hdev->pdev->dev,
4424 			"Get vector index fail. vector = %d\n", vector);
4425 		return vector_id;
4426 	}
4427 
4428 	hclge_free_vector(hdev, vector_id);
4429 
4430 	return 0;
4431 }
4432 
hclge_get_rss_key_size(struct hnae3_handle * handle)4433 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4434 {
4435 	return HCLGE_RSS_KEY_SIZE;
4436 }
4437 
hclge_set_rss_algo_key(struct hclge_dev * hdev,const u8 hfunc,const u8 * key)4438 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4439 				  const u8 hfunc, const u8 *key)
4440 {
4441 	struct hclge_rss_config_cmd *req;
4442 	unsigned int key_offset = 0;
4443 	struct hclge_desc desc;
4444 	int key_counts;
4445 	int key_size;
4446 	int ret;
4447 
4448 	key_counts = HCLGE_RSS_KEY_SIZE;
4449 	req = (struct hclge_rss_config_cmd *)desc.data;
4450 
4451 	while (key_counts) {
4452 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4453 					   false);
4454 
4455 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4456 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4457 
4458 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4459 		memcpy(req->hash_key,
4460 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4461 
4462 		key_counts -= key_size;
4463 		key_offset++;
4464 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4465 		if (ret) {
4466 			dev_err(&hdev->pdev->dev,
4467 				"Configure RSS config fail, status = %d\n",
4468 				ret);
4469 			return ret;
4470 		}
4471 	}
4472 	return 0;
4473 }
4474 
hclge_set_rss_indir_table(struct hclge_dev * hdev,const u16 * indir)4475 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4476 {
4477 	struct hclge_rss_indirection_table_cmd *req;
4478 	struct hclge_desc desc;
4479 	int rss_cfg_tbl_num;
4480 	u8 rss_msb_oft;
4481 	u8 rss_msb_val;
4482 	int ret;
4483 	u16 qid;
4484 	int i;
4485 	u32 j;
4486 
4487 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4488 	rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4489 			  HCLGE_RSS_CFG_TBL_SIZE;
4490 
4491 	for (i = 0; i < rss_cfg_tbl_num; i++) {
4492 		hclge_cmd_setup_basic_desc
4493 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4494 
4495 		req->start_table_index =
4496 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4497 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4498 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4499 			qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4500 			req->rss_qid_l[j] = qid & 0xff;
4501 			rss_msb_oft =
4502 				j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4503 			rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4504 				(j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4505 			req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4506 		}
4507 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4508 		if (ret) {
4509 			dev_err(&hdev->pdev->dev,
4510 				"Configure rss indir table fail,status = %d\n",
4511 				ret);
4512 			return ret;
4513 		}
4514 	}
4515 	return 0;
4516 }
4517 
hclge_set_rss_tc_mode(struct hclge_dev * hdev,u16 * tc_valid,u16 * tc_size,u16 * tc_offset)4518 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4519 				 u16 *tc_size, u16 *tc_offset)
4520 {
4521 	struct hclge_rss_tc_mode_cmd *req;
4522 	struct hclge_desc desc;
4523 	int ret;
4524 	int i;
4525 
4526 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4527 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4528 
4529 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4530 		u16 mode = 0;
4531 
4532 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4533 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4534 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4535 		hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4536 			      tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4537 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4538 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4539 
4540 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4541 	}
4542 
4543 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4544 	if (ret)
4545 		dev_err(&hdev->pdev->dev,
4546 			"Configure rss tc mode fail, status = %d\n", ret);
4547 
4548 	return ret;
4549 }
4550 
hclge_get_rss_type(struct hclge_vport * vport)4551 static void hclge_get_rss_type(struct hclge_vport *vport)
4552 {
4553 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4554 	    vport->rss_tuple_sets.ipv4_udp_en ||
4555 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4556 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4557 	    vport->rss_tuple_sets.ipv6_udp_en ||
4558 	    vport->rss_tuple_sets.ipv6_sctp_en)
4559 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4560 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4561 		 vport->rss_tuple_sets.ipv6_fragment_en)
4562 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4563 	else
4564 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4565 }
4566 
hclge_set_rss_input_tuple(struct hclge_dev * hdev)4567 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4568 {
4569 	struct hclge_rss_input_tuple_cmd *req;
4570 	struct hclge_desc desc;
4571 	int ret;
4572 
4573 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4574 
4575 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4576 
4577 	/* Get the tuple cfg from pf */
4578 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4579 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4580 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4581 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4582 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4583 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4584 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4585 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4586 	hclge_get_rss_type(&hdev->vport[0]);
4587 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4588 	if (ret)
4589 		dev_err(&hdev->pdev->dev,
4590 			"Configure rss input fail, status = %d\n", ret);
4591 	return ret;
4592 }
4593 
hclge_get_rss(struct hnae3_handle * handle,u32 * indir,u8 * key,u8 * hfunc)4594 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4595 			 u8 *key, u8 *hfunc)
4596 {
4597 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4598 	struct hclge_vport *vport = hclge_get_vport(handle);
4599 	int i;
4600 
4601 	/* Get hash algorithm */
4602 	if (hfunc) {
4603 		switch (vport->rss_algo) {
4604 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4605 			*hfunc = ETH_RSS_HASH_TOP;
4606 			break;
4607 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4608 			*hfunc = ETH_RSS_HASH_XOR;
4609 			break;
4610 		default:
4611 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4612 			break;
4613 		}
4614 	}
4615 
4616 	/* Get the RSS Key required by the user */
4617 	if (key)
4618 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4619 
4620 	/* Get indirect table */
4621 	if (indir)
4622 		for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4623 			indir[i] =  vport->rss_indirection_tbl[i];
4624 
4625 	return 0;
4626 }
4627 
hclge_set_rss(struct hnae3_handle * handle,const u32 * indir,const u8 * key,const u8 hfunc)4628 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4629 			 const  u8 *key, const  u8 hfunc)
4630 {
4631 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4632 	struct hclge_vport *vport = hclge_get_vport(handle);
4633 	struct hclge_dev *hdev = vport->back;
4634 	u8 hash_algo;
4635 	int ret, i;
4636 
4637 	/* Set the RSS Hash Key if specififed by the user */
4638 	if (key) {
4639 		switch (hfunc) {
4640 		case ETH_RSS_HASH_TOP:
4641 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4642 			break;
4643 		case ETH_RSS_HASH_XOR:
4644 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4645 			break;
4646 		case ETH_RSS_HASH_NO_CHANGE:
4647 			hash_algo = vport->rss_algo;
4648 			break;
4649 		default:
4650 			return -EINVAL;
4651 		}
4652 
4653 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4654 		if (ret)
4655 			return ret;
4656 
4657 		/* Update the shadow RSS key with user specified qids */
4658 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4659 		vport->rss_algo = hash_algo;
4660 	}
4661 
4662 	/* Update the shadow RSS table with user specified qids */
4663 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4664 		vport->rss_indirection_tbl[i] = indir[i];
4665 
4666 	/* Update the hardware */
4667 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4668 }
4669 
hclge_get_rss_hash_bits(struct ethtool_rxnfc * nfc)4670 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4671 {
4672 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4673 
4674 	if (nfc->data & RXH_L4_B_2_3)
4675 		hash_sets |= HCLGE_D_PORT_BIT;
4676 	else
4677 		hash_sets &= ~HCLGE_D_PORT_BIT;
4678 
4679 	if (nfc->data & RXH_IP_SRC)
4680 		hash_sets |= HCLGE_S_IP_BIT;
4681 	else
4682 		hash_sets &= ~HCLGE_S_IP_BIT;
4683 
4684 	if (nfc->data & RXH_IP_DST)
4685 		hash_sets |= HCLGE_D_IP_BIT;
4686 	else
4687 		hash_sets &= ~HCLGE_D_IP_BIT;
4688 
4689 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4690 		hash_sets |= HCLGE_V_TAG_BIT;
4691 
4692 	return hash_sets;
4693 }
4694 
hclge_init_rss_tuple_cmd(struct hclge_vport * vport,struct ethtool_rxnfc * nfc,struct hclge_rss_input_tuple_cmd * req)4695 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4696 				    struct ethtool_rxnfc *nfc,
4697 				    struct hclge_rss_input_tuple_cmd *req)
4698 {
4699 	struct hclge_dev *hdev = vport->back;
4700 	u8 tuple_sets;
4701 
4702 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4703 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4704 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4705 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4706 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4707 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4708 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4709 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4710 
4711 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4712 	switch (nfc->flow_type) {
4713 	case TCP_V4_FLOW:
4714 		req->ipv4_tcp_en = tuple_sets;
4715 		break;
4716 	case TCP_V6_FLOW:
4717 		req->ipv6_tcp_en = tuple_sets;
4718 		break;
4719 	case UDP_V4_FLOW:
4720 		req->ipv4_udp_en = tuple_sets;
4721 		break;
4722 	case UDP_V6_FLOW:
4723 		req->ipv6_udp_en = tuple_sets;
4724 		break;
4725 	case SCTP_V4_FLOW:
4726 		req->ipv4_sctp_en = tuple_sets;
4727 		break;
4728 	case SCTP_V6_FLOW:
4729 		if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4730 		    (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4731 			return -EINVAL;
4732 
4733 		req->ipv6_sctp_en = tuple_sets;
4734 		break;
4735 	case IPV4_FLOW:
4736 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4737 		break;
4738 	case IPV6_FLOW:
4739 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4740 		break;
4741 	default:
4742 		return -EINVAL;
4743 	}
4744 
4745 	return 0;
4746 }
4747 
hclge_set_rss_tuple(struct hnae3_handle * handle,struct ethtool_rxnfc * nfc)4748 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4749 			       struct ethtool_rxnfc *nfc)
4750 {
4751 	struct hclge_vport *vport = hclge_get_vport(handle);
4752 	struct hclge_dev *hdev = vport->back;
4753 	struct hclge_rss_input_tuple_cmd *req;
4754 	struct hclge_desc desc;
4755 	int ret;
4756 
4757 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4758 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4759 		return -EINVAL;
4760 
4761 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4762 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4763 
4764 	ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4765 	if (ret) {
4766 		dev_err(&hdev->pdev->dev,
4767 			"failed to init rss tuple cmd, ret = %d\n", ret);
4768 		return ret;
4769 	}
4770 
4771 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4772 	if (ret) {
4773 		dev_err(&hdev->pdev->dev,
4774 			"Set rss tuple fail, status = %d\n", ret);
4775 		return ret;
4776 	}
4777 
4778 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4779 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4780 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4781 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4782 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4783 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4784 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4785 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4786 	hclge_get_rss_type(vport);
4787 	return 0;
4788 }
4789 
hclge_get_vport_rss_tuple(struct hclge_vport * vport,int flow_type,u8 * tuple_sets)4790 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4791 				     u8 *tuple_sets)
4792 {
4793 	switch (flow_type) {
4794 	case TCP_V4_FLOW:
4795 		*tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4796 		break;
4797 	case UDP_V4_FLOW:
4798 		*tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4799 		break;
4800 	case TCP_V6_FLOW:
4801 		*tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4802 		break;
4803 	case UDP_V6_FLOW:
4804 		*tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4805 		break;
4806 	case SCTP_V4_FLOW:
4807 		*tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4808 		break;
4809 	case SCTP_V6_FLOW:
4810 		*tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4811 		break;
4812 	case IPV4_FLOW:
4813 	case IPV6_FLOW:
4814 		*tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4815 		break;
4816 	default:
4817 		return -EINVAL;
4818 	}
4819 
4820 	return 0;
4821 }
4822 
hclge_convert_rss_tuple(u8 tuple_sets)4823 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4824 {
4825 	u64 tuple_data = 0;
4826 
4827 	if (tuple_sets & HCLGE_D_PORT_BIT)
4828 		tuple_data |= RXH_L4_B_2_3;
4829 	if (tuple_sets & HCLGE_S_PORT_BIT)
4830 		tuple_data |= RXH_L4_B_0_1;
4831 	if (tuple_sets & HCLGE_D_IP_BIT)
4832 		tuple_data |= RXH_IP_DST;
4833 	if (tuple_sets & HCLGE_S_IP_BIT)
4834 		tuple_data |= RXH_IP_SRC;
4835 
4836 	return tuple_data;
4837 }
4838 
hclge_get_rss_tuple(struct hnae3_handle * handle,struct ethtool_rxnfc * nfc)4839 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4840 			       struct ethtool_rxnfc *nfc)
4841 {
4842 	struct hclge_vport *vport = hclge_get_vport(handle);
4843 	u8 tuple_sets;
4844 	int ret;
4845 
4846 	nfc->data = 0;
4847 
4848 	ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4849 	if (ret || !tuple_sets)
4850 		return ret;
4851 
4852 	nfc->data = hclge_convert_rss_tuple(tuple_sets);
4853 
4854 	return 0;
4855 }
4856 
hclge_get_tc_size(struct hnae3_handle * handle)4857 static int hclge_get_tc_size(struct hnae3_handle *handle)
4858 {
4859 	struct hclge_vport *vport = hclge_get_vport(handle);
4860 	struct hclge_dev *hdev = vport->back;
4861 
4862 	return hdev->pf_rss_size_max;
4863 }
4864 
hclge_init_rss_tc_mode(struct hclge_dev * hdev)4865 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4866 {
4867 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4868 	struct hclge_vport *vport = hdev->vport;
4869 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4870 	u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4871 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4872 	struct hnae3_tc_info *tc_info;
4873 	u16 roundup_size;
4874 	u16 rss_size;
4875 	int i;
4876 
4877 	tc_info = &vport->nic.kinfo.tc_info;
4878 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4879 		rss_size = tc_info->tqp_count[i];
4880 		tc_valid[i] = 0;
4881 
4882 		if (!(hdev->hw_tc_map & BIT(i)))
4883 			continue;
4884 
4885 		/* tc_size set to hardware is the log2 of roundup power of two
4886 		 * of rss_size, the acutal queue size is limited by indirection
4887 		 * table.
4888 		 */
4889 		if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4890 		    rss_size == 0) {
4891 			dev_err(&hdev->pdev->dev,
4892 				"Configure rss tc size failed, invalid TC_SIZE = %u\n",
4893 				rss_size);
4894 			return -EINVAL;
4895 		}
4896 
4897 		roundup_size = roundup_pow_of_two(rss_size);
4898 		roundup_size = ilog2(roundup_size);
4899 
4900 		tc_valid[i] = 1;
4901 		tc_size[i] = roundup_size;
4902 		tc_offset[i] = tc_info->tqp_offset[i];
4903 	}
4904 
4905 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4906 }
4907 
hclge_rss_init_hw(struct hclge_dev * hdev)4908 int hclge_rss_init_hw(struct hclge_dev *hdev)
4909 {
4910 	struct hclge_vport *vport = hdev->vport;
4911 	u16 *rss_indir = vport[0].rss_indirection_tbl;
4912 	u8 *key = vport[0].rss_hash_key;
4913 	u8 hfunc = vport[0].rss_algo;
4914 	int ret;
4915 
4916 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4917 	if (ret)
4918 		return ret;
4919 
4920 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4921 	if (ret)
4922 		return ret;
4923 
4924 	ret = hclge_set_rss_input_tuple(hdev);
4925 	if (ret)
4926 		return ret;
4927 
4928 	return hclge_init_rss_tc_mode(hdev);
4929 }
4930 
hclge_rss_indir_init_cfg(struct hclge_dev * hdev)4931 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4932 {
4933 	struct hclge_vport *vport = &hdev->vport[0];
4934 	int i;
4935 
4936 	for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
4937 		vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
4938 }
4939 
hclge_rss_init_cfg(struct hclge_dev * hdev)4940 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
4941 {
4942 	u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
4943 	int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4944 	struct hclge_vport *vport = &hdev->vport[0];
4945 	u16 *rss_ind_tbl;
4946 
4947 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4948 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4949 
4950 	vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4951 	vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4952 	vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
4953 	vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4954 	vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4955 	vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4956 	vport->rss_tuple_sets.ipv6_sctp_en =
4957 		hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4958 		HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4959 		HCLGE_RSS_INPUT_TUPLE_SCTP;
4960 	vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4961 
4962 	vport->rss_algo = rss_algo;
4963 
4964 	rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
4965 				   sizeof(*rss_ind_tbl), GFP_KERNEL);
4966 	if (!rss_ind_tbl)
4967 		return -ENOMEM;
4968 
4969 	vport->rss_indirection_tbl = rss_ind_tbl;
4970 	memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
4971 
4972 	hclge_rss_indir_init_cfg(hdev);
4973 
4974 	return 0;
4975 }
4976 
hclge_bind_ring_with_vector(struct hclge_vport * vport,int vector_id,bool en,struct hnae3_ring_chain_node * ring_chain)4977 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4978 				int vector_id, bool en,
4979 				struct hnae3_ring_chain_node *ring_chain)
4980 {
4981 	struct hclge_dev *hdev = vport->back;
4982 	struct hnae3_ring_chain_node *node;
4983 	struct hclge_desc desc;
4984 	struct hclge_ctrl_vector_chain_cmd *req =
4985 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
4986 	enum hclge_cmd_status status;
4987 	enum hclge_opcode_type op;
4988 	u16 tqp_type_and_id;
4989 	int i;
4990 
4991 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4992 	hclge_cmd_setup_basic_desc(&desc, op, false);
4993 	req->int_vector_id_l = hnae3_get_field(vector_id,
4994 					       HCLGE_VECTOR_ID_L_M,
4995 					       HCLGE_VECTOR_ID_L_S);
4996 	req->int_vector_id_h = hnae3_get_field(vector_id,
4997 					       HCLGE_VECTOR_ID_H_M,
4998 					       HCLGE_VECTOR_ID_H_S);
4999 
5000 	i = 0;
5001 	for (node = ring_chain; node; node = node->next) {
5002 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5003 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
5004 				HCLGE_INT_TYPE_S,
5005 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5006 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5007 				HCLGE_TQP_ID_S, node->tqp_index);
5008 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5009 				HCLGE_INT_GL_IDX_S,
5010 				hnae3_get_field(node->int_gl_idx,
5011 						HNAE3_RING_GL_IDX_M,
5012 						HNAE3_RING_GL_IDX_S));
5013 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5014 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5015 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5016 			req->vfid = vport->vport_id;
5017 
5018 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
5019 			if (status) {
5020 				dev_err(&hdev->pdev->dev,
5021 					"Map TQP fail, status is %d.\n",
5022 					status);
5023 				return -EIO;
5024 			}
5025 			i = 0;
5026 
5027 			hclge_cmd_setup_basic_desc(&desc,
5028 						   op,
5029 						   false);
5030 			req->int_vector_id_l =
5031 				hnae3_get_field(vector_id,
5032 						HCLGE_VECTOR_ID_L_M,
5033 						HCLGE_VECTOR_ID_L_S);
5034 			req->int_vector_id_h =
5035 				hnae3_get_field(vector_id,
5036 						HCLGE_VECTOR_ID_H_M,
5037 						HCLGE_VECTOR_ID_H_S);
5038 		}
5039 	}
5040 
5041 	if (i > 0) {
5042 		req->int_cause_num = i;
5043 		req->vfid = vport->vport_id;
5044 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
5045 		if (status) {
5046 			dev_err(&hdev->pdev->dev,
5047 				"Map TQP fail, status is %d.\n", status);
5048 			return -EIO;
5049 		}
5050 	}
5051 
5052 	return 0;
5053 }
5054 
hclge_map_ring_to_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)5055 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5056 				    struct hnae3_ring_chain_node *ring_chain)
5057 {
5058 	struct hclge_vport *vport = hclge_get_vport(handle);
5059 	struct hclge_dev *hdev = vport->back;
5060 	int vector_id;
5061 
5062 	vector_id = hclge_get_vector_index(hdev, vector);
5063 	if (vector_id < 0) {
5064 		dev_err(&hdev->pdev->dev,
5065 			"failed to get vector index. vector=%d\n", vector);
5066 		return vector_id;
5067 	}
5068 
5069 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5070 }
5071 
hclge_unmap_ring_frm_vector(struct hnae3_handle * handle,int vector,struct hnae3_ring_chain_node * ring_chain)5072 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5073 				       struct hnae3_ring_chain_node *ring_chain)
5074 {
5075 	struct hclge_vport *vport = hclge_get_vport(handle);
5076 	struct hclge_dev *hdev = vport->back;
5077 	int vector_id, ret;
5078 
5079 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5080 		return 0;
5081 
5082 	vector_id = hclge_get_vector_index(hdev, vector);
5083 	if (vector_id < 0) {
5084 		dev_err(&handle->pdev->dev,
5085 			"Get vector index fail. ret =%d\n", vector_id);
5086 		return vector_id;
5087 	}
5088 
5089 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5090 	if (ret)
5091 		dev_err(&handle->pdev->dev,
5092 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5093 			vector_id, ret);
5094 
5095 	return ret;
5096 }
5097 
hclge_cmd_set_promisc_mode(struct hclge_dev * hdev,u8 vf_id,bool en_uc,bool en_mc,bool en_bc)5098 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5099 				      bool en_uc, bool en_mc, bool en_bc)
5100 {
5101 	struct hclge_vport *vport = &hdev->vport[vf_id];
5102 	struct hnae3_handle *handle = &vport->nic;
5103 	struct hclge_promisc_cfg_cmd *req;
5104 	struct hclge_desc desc;
5105 	bool uc_tx_en = en_uc;
5106 	u8 promisc_cfg = 0;
5107 	int ret;
5108 
5109 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5110 
5111 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
5112 	req->vf_id = vf_id;
5113 
5114 	if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5115 		uc_tx_en = false;
5116 
5117 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5118 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5119 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5120 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5121 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5122 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5123 	req->extend_promisc = promisc_cfg;
5124 
5125 	/* to be compatible with DEVICE_VERSION_V1/2 */
5126 	promisc_cfg = 0;
5127 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5128 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5129 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5130 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5131 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5132 	req->promisc = promisc_cfg;
5133 
5134 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5135 	if (ret)
5136 		dev_err(&hdev->pdev->dev,
5137 			"failed to set vport %u promisc mode, ret = %d.\n",
5138 			vf_id, ret);
5139 
5140 	return ret;
5141 }
5142 
hclge_set_vport_promisc_mode(struct hclge_vport * vport,bool en_uc_pmc,bool en_mc_pmc,bool en_bc_pmc)5143 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5144 				 bool en_mc_pmc, bool en_bc_pmc)
5145 {
5146 	return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5147 					  en_uc_pmc, en_mc_pmc, en_bc_pmc);
5148 }
5149 
hclge_set_promisc_mode(struct hnae3_handle * handle,bool en_uc_pmc,bool en_mc_pmc)5150 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5151 				  bool en_mc_pmc)
5152 {
5153 	struct hclge_vport *vport = hclge_get_vport(handle);
5154 	struct hclge_dev *hdev = vport->back;
5155 	bool en_bc_pmc = true;
5156 
5157 	/* For device whose version below V2, if broadcast promisc enabled,
5158 	 * vlan filter is always bypassed. So broadcast promisc should be
5159 	 * disabled until user enable promisc mode
5160 	 */
5161 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5162 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5163 
5164 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5165 					    en_bc_pmc);
5166 }
5167 
hclge_request_update_promisc_mode(struct hnae3_handle * handle)5168 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5169 {
5170 	struct hclge_vport *vport = hclge_get_vport(handle);
5171 	struct hclge_dev *hdev = vport->back;
5172 
5173 	set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
5174 }
5175 
hclge_sync_fd_state(struct hclge_dev * hdev)5176 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5177 {
5178 	if (hlist_empty(&hdev->fd_rule_list))
5179 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5180 }
5181 
hclge_fd_inc_rule_cnt(struct hclge_dev * hdev,u16 location)5182 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5183 {
5184 	if (!test_bit(location, hdev->fd_bmap)) {
5185 		set_bit(location, hdev->fd_bmap);
5186 		hdev->hclge_fd_rule_num++;
5187 	}
5188 }
5189 
hclge_fd_dec_rule_cnt(struct hclge_dev * hdev,u16 location)5190 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5191 {
5192 	if (test_bit(location, hdev->fd_bmap)) {
5193 		clear_bit(location, hdev->fd_bmap);
5194 		hdev->hclge_fd_rule_num--;
5195 	}
5196 }
5197 
hclge_fd_free_node(struct hclge_dev * hdev,struct hclge_fd_rule * rule)5198 static void hclge_fd_free_node(struct hclge_dev *hdev,
5199 			       struct hclge_fd_rule *rule)
5200 {
5201 	hlist_del(&rule->rule_node);
5202 	kfree(rule);
5203 	hclge_sync_fd_state(hdev);
5204 }
5205 
hclge_update_fd_rule_node(struct hclge_dev * hdev,struct hclge_fd_rule * old_rule,struct hclge_fd_rule * new_rule,enum HCLGE_FD_NODE_STATE state)5206 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5207 				      struct hclge_fd_rule *old_rule,
5208 				      struct hclge_fd_rule *new_rule,
5209 				      enum HCLGE_FD_NODE_STATE state)
5210 {
5211 	switch (state) {
5212 	case HCLGE_FD_TO_ADD:
5213 	case HCLGE_FD_ACTIVE:
5214 		/* 1) if the new state is TO_ADD, just replace the old rule
5215 		 * with the same location, no matter its state, because the
5216 		 * new rule will be configured to the hardware.
5217 		 * 2) if the new state is ACTIVE, it means the new rule
5218 		 * has been configured to the hardware, so just replace
5219 		 * the old rule node with the same location.
5220 		 * 3) for it doesn't add a new node to the list, so it's
5221 		 * unnecessary to update the rule number and fd_bmap.
5222 		 */
5223 		new_rule->rule_node.next = old_rule->rule_node.next;
5224 		new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5225 		memcpy(old_rule, new_rule, sizeof(*old_rule));
5226 		kfree(new_rule);
5227 		break;
5228 	case HCLGE_FD_DELETED:
5229 		hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5230 		hclge_fd_free_node(hdev, old_rule);
5231 		break;
5232 	case HCLGE_FD_TO_DEL:
5233 		/* if new request is TO_DEL, and old rule is existent
5234 		 * 1) the state of old rule is TO_DEL, we need do nothing,
5235 		 * because we delete rule by location, other rule content
5236 		 * is unncessary.
5237 		 * 2) the state of old rule is ACTIVE, we need to change its
5238 		 * state to TO_DEL, so the rule will be deleted when periodic
5239 		 * task being scheduled.
5240 		 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5241 		 * been added to hardware, so we just delete the rule node from
5242 		 * fd_rule_list directly.
5243 		 */
5244 		if (old_rule->state == HCLGE_FD_TO_ADD) {
5245 			hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5246 			hclge_fd_free_node(hdev, old_rule);
5247 			return;
5248 		}
5249 		old_rule->state = HCLGE_FD_TO_DEL;
5250 		break;
5251 	}
5252 }
5253 
hclge_find_fd_rule(struct hlist_head * hlist,u16 location,struct hclge_fd_rule ** parent)5254 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5255 						u16 location,
5256 						struct hclge_fd_rule **parent)
5257 {
5258 	struct hclge_fd_rule *rule;
5259 	struct hlist_node *node;
5260 
5261 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5262 		if (rule->location == location)
5263 			return rule;
5264 		else if (rule->location > location)
5265 			return NULL;
5266 		/* record the parent node, use to keep the nodes in fd_rule_list
5267 		 * in ascend order.
5268 		 */
5269 		*parent = rule;
5270 	}
5271 
5272 	return NULL;
5273 }
5274 
5275 /* insert fd rule node in ascend order according to rule->location */
hclge_fd_insert_rule_node(struct hlist_head * hlist,struct hclge_fd_rule * rule,struct hclge_fd_rule * parent)5276 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5277 				      struct hclge_fd_rule *rule,
5278 				      struct hclge_fd_rule *parent)
5279 {
5280 	INIT_HLIST_NODE(&rule->rule_node);
5281 
5282 	if (parent)
5283 		hlist_add_behind(&rule->rule_node, &parent->rule_node);
5284 	else
5285 		hlist_add_head(&rule->rule_node, hlist);
5286 }
5287 
hclge_fd_set_user_def_cmd(struct hclge_dev * hdev,struct hclge_fd_user_def_cfg * cfg)5288 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5289 				     struct hclge_fd_user_def_cfg *cfg)
5290 {
5291 	struct hclge_fd_user_def_cfg_cmd *req;
5292 	struct hclge_desc desc;
5293 	u16 data = 0;
5294 	int ret;
5295 
5296 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5297 
5298 	req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5299 
5300 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5301 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5302 			HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5303 	req->ol2_cfg = cpu_to_le16(data);
5304 
5305 	data = 0;
5306 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5307 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5308 			HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5309 	req->ol3_cfg = cpu_to_le16(data);
5310 
5311 	data = 0;
5312 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5313 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5314 			HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5315 	req->ol4_cfg = cpu_to_le16(data);
5316 
5317 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5318 	if (ret)
5319 		dev_err(&hdev->pdev->dev,
5320 			"failed to set fd user def data, ret= %d\n", ret);
5321 	return ret;
5322 }
5323 
hclge_sync_fd_user_def_cfg(struct hclge_dev * hdev,bool locked)5324 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5325 {
5326 	int ret;
5327 
5328 	if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5329 		return;
5330 
5331 	if (!locked)
5332 		spin_lock_bh(&hdev->fd_rule_lock);
5333 
5334 	ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5335 	if (ret)
5336 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5337 
5338 	if (!locked)
5339 		spin_unlock_bh(&hdev->fd_rule_lock);
5340 }
5341 
hclge_fd_check_user_def_refcnt(struct hclge_dev * hdev,struct hclge_fd_rule * rule)5342 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5343 					  struct hclge_fd_rule *rule)
5344 {
5345 	struct hlist_head *hlist = &hdev->fd_rule_list;
5346 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5347 	struct hclge_fd_user_def_info *info, *old_info;
5348 	struct hclge_fd_user_def_cfg *cfg;
5349 
5350 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5351 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5352 		return 0;
5353 
5354 	/* for valid layer is start from 1, so need minus 1 to get the cfg */
5355 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5356 	info = &rule->ep.user_def;
5357 
5358 	if (!cfg->ref_cnt || cfg->offset == info->offset)
5359 		return 0;
5360 
5361 	if (cfg->ref_cnt > 1)
5362 		goto error;
5363 
5364 	fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5365 	if (fd_rule) {
5366 		old_info = &fd_rule->ep.user_def;
5367 		if (info->layer == old_info->layer)
5368 			return 0;
5369 	}
5370 
5371 error:
5372 	dev_err(&hdev->pdev->dev,
5373 		"No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5374 		info->layer + 1);
5375 	return -ENOSPC;
5376 }
5377 
hclge_fd_inc_user_def_refcnt(struct hclge_dev * hdev,struct hclge_fd_rule * rule)5378 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5379 					 struct hclge_fd_rule *rule)
5380 {
5381 	struct hclge_fd_user_def_cfg *cfg;
5382 
5383 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5384 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5385 		return;
5386 
5387 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5388 	if (!cfg->ref_cnt) {
5389 		cfg->offset = rule->ep.user_def.offset;
5390 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5391 	}
5392 	cfg->ref_cnt++;
5393 }
5394 
hclge_fd_dec_user_def_refcnt(struct hclge_dev * hdev,struct hclge_fd_rule * rule)5395 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5396 					 struct hclge_fd_rule *rule)
5397 {
5398 	struct hclge_fd_user_def_cfg *cfg;
5399 
5400 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5401 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5402 		return;
5403 
5404 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5405 	if (!cfg->ref_cnt)
5406 		return;
5407 
5408 	cfg->ref_cnt--;
5409 	if (!cfg->ref_cnt) {
5410 		cfg->offset = 0;
5411 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5412 	}
5413 }
5414 
hclge_update_fd_list(struct hclge_dev * hdev,enum HCLGE_FD_NODE_STATE state,u16 location,struct hclge_fd_rule * new_rule)5415 static void hclge_update_fd_list(struct hclge_dev *hdev,
5416 				 enum HCLGE_FD_NODE_STATE state, u16 location,
5417 				 struct hclge_fd_rule *new_rule)
5418 {
5419 	struct hlist_head *hlist = &hdev->fd_rule_list;
5420 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5421 
5422 	fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5423 	if (fd_rule) {
5424 		hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5425 		if (state == HCLGE_FD_ACTIVE)
5426 			hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5427 		hclge_sync_fd_user_def_cfg(hdev, true);
5428 
5429 		hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5430 		return;
5431 	}
5432 
5433 	/* it's unlikely to fail here, because we have checked the rule
5434 	 * exist before.
5435 	 */
5436 	if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5437 		dev_warn(&hdev->pdev->dev,
5438 			 "failed to delete fd rule %u, it's inexistent\n",
5439 			 location);
5440 		return;
5441 	}
5442 
5443 	hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5444 	hclge_sync_fd_user_def_cfg(hdev, true);
5445 
5446 	hclge_fd_insert_rule_node(hlist, new_rule, parent);
5447 	hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5448 
5449 	if (state == HCLGE_FD_TO_ADD) {
5450 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5451 		hclge_task_schedule(hdev, 0);
5452 	}
5453 }
5454 
hclge_get_fd_mode(struct hclge_dev * hdev,u8 * fd_mode)5455 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5456 {
5457 	struct hclge_get_fd_mode_cmd *req;
5458 	struct hclge_desc desc;
5459 	int ret;
5460 
5461 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5462 
5463 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
5464 
5465 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5466 	if (ret) {
5467 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5468 		return ret;
5469 	}
5470 
5471 	*fd_mode = req->mode;
5472 
5473 	return ret;
5474 }
5475 
hclge_get_fd_allocation(struct hclge_dev * hdev,u32 * stage1_entry_num,u32 * stage2_entry_num,u16 * stage1_counter_num,u16 * stage2_counter_num)5476 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5477 				   u32 *stage1_entry_num,
5478 				   u32 *stage2_entry_num,
5479 				   u16 *stage1_counter_num,
5480 				   u16 *stage2_counter_num)
5481 {
5482 	struct hclge_get_fd_allocation_cmd *req;
5483 	struct hclge_desc desc;
5484 	int ret;
5485 
5486 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5487 
5488 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5489 
5490 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5491 	if (ret) {
5492 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5493 			ret);
5494 		return ret;
5495 	}
5496 
5497 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5498 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5499 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5500 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5501 
5502 	return ret;
5503 }
5504 
hclge_set_fd_key_config(struct hclge_dev * hdev,enum HCLGE_FD_STAGE stage_num)5505 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5506 				   enum HCLGE_FD_STAGE stage_num)
5507 {
5508 	struct hclge_set_fd_key_config_cmd *req;
5509 	struct hclge_fd_key_cfg *stage;
5510 	struct hclge_desc desc;
5511 	int ret;
5512 
5513 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5514 
5515 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5516 	stage = &hdev->fd_cfg.key_cfg[stage_num];
5517 	req->stage = stage_num;
5518 	req->key_select = stage->key_sel;
5519 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5520 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5521 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5522 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5523 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5524 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5525 
5526 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5527 	if (ret)
5528 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5529 
5530 	return ret;
5531 }
5532 
hclge_fd_disable_user_def(struct hclge_dev * hdev)5533 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5534 {
5535 	struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5536 
5537 	spin_lock_bh(&hdev->fd_rule_lock);
5538 	memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5539 	spin_unlock_bh(&hdev->fd_rule_lock);
5540 
5541 	hclge_fd_set_user_def_cmd(hdev, cfg);
5542 }
5543 
hclge_init_fd_config(struct hclge_dev * hdev)5544 static int hclge_init_fd_config(struct hclge_dev *hdev)
5545 {
5546 #define LOW_2_WORDS		0x03
5547 	struct hclge_fd_key_cfg *key_cfg;
5548 	int ret;
5549 
5550 	if (!hnae3_dev_fd_supported(hdev))
5551 		return 0;
5552 
5553 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5554 	if (ret)
5555 		return ret;
5556 
5557 	switch (hdev->fd_cfg.fd_mode) {
5558 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5559 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5560 		break;
5561 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5562 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5563 		break;
5564 	default:
5565 		dev_err(&hdev->pdev->dev,
5566 			"Unsupported flow director mode %u\n",
5567 			hdev->fd_cfg.fd_mode);
5568 		return -EOPNOTSUPP;
5569 	}
5570 
5571 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5572 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5573 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5574 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5575 	key_cfg->outer_sipv6_word_en = 0;
5576 	key_cfg->outer_dipv6_word_en = 0;
5577 
5578 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5579 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5580 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5581 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5582 
5583 	/* If use max 400bit key, we can support tuples for ether type */
5584 	if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5585 		key_cfg->tuple_active |=
5586 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5587 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5588 			key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5589 	}
5590 
5591 	/* roce_type is used to filter roce frames
5592 	 * dst_vport is used to specify the rule
5593 	 */
5594 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5595 
5596 	ret = hclge_get_fd_allocation(hdev,
5597 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5598 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5599 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5600 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5601 	if (ret)
5602 		return ret;
5603 
5604 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5605 }
5606 
hclge_fd_tcam_config(struct hclge_dev * hdev,u8 stage,bool sel_x,int loc,u8 * key,bool is_add)5607 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5608 				int loc, u8 *key, bool is_add)
5609 {
5610 	struct hclge_fd_tcam_config_1_cmd *req1;
5611 	struct hclge_fd_tcam_config_2_cmd *req2;
5612 	struct hclge_fd_tcam_config_3_cmd *req3;
5613 	struct hclge_desc desc[3];
5614 	int ret;
5615 
5616 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5617 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5618 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5619 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5620 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5621 
5622 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5623 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5624 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5625 
5626 	req1->stage = stage;
5627 	req1->xy_sel = sel_x ? 1 : 0;
5628 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5629 	req1->index = cpu_to_le32(loc);
5630 	req1->entry_vld = sel_x ? is_add : 0;
5631 
5632 	if (key) {
5633 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5634 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5635 		       sizeof(req2->tcam_data));
5636 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5637 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5638 	}
5639 
5640 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
5641 	if (ret)
5642 		dev_err(&hdev->pdev->dev,
5643 			"config tcam key fail, ret=%d\n",
5644 			ret);
5645 
5646 	return ret;
5647 }
5648 
hclge_fd_ad_config(struct hclge_dev * hdev,u8 stage,int loc,struct hclge_fd_ad_data * action)5649 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5650 			      struct hclge_fd_ad_data *action)
5651 {
5652 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5653 	struct hclge_fd_ad_config_cmd *req;
5654 	struct hclge_desc desc;
5655 	u64 ad_data = 0;
5656 	int ret;
5657 
5658 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5659 
5660 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
5661 	req->index = cpu_to_le32(loc);
5662 	req->stage = stage;
5663 
5664 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5665 		      action->write_rule_id_to_bd);
5666 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5667 			action->rule_id);
5668 	if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5669 		hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5670 			      action->override_tc);
5671 		hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5672 				HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5673 	}
5674 	ad_data <<= 32;
5675 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5676 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5677 		      action->forward_to_direct_queue);
5678 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5679 			action->queue_id);
5680 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5681 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5682 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5683 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5684 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5685 			action->counter_id);
5686 
5687 	req->ad_data = cpu_to_le64(ad_data);
5688 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5689 	if (ret)
5690 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5691 
5692 	return ret;
5693 }
5694 
hclge_fd_convert_tuple(u32 tuple_bit,u8 * key_x,u8 * key_y,struct hclge_fd_rule * rule)5695 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5696 				   struct hclge_fd_rule *rule)
5697 {
5698 	int offset, moffset, ip_offset;
5699 	enum HCLGE_FD_KEY_OPT key_opt;
5700 	u16 tmp_x_s, tmp_y_s;
5701 	u32 tmp_x_l, tmp_y_l;
5702 	u8 *p = (u8 *)rule;
5703 	int i;
5704 
5705 	if (rule->unused_tuple & BIT(tuple_bit))
5706 		return true;
5707 
5708 	key_opt = tuple_key_info[tuple_bit].key_opt;
5709 	offset = tuple_key_info[tuple_bit].offset;
5710 	moffset = tuple_key_info[tuple_bit].moffset;
5711 
5712 	switch (key_opt) {
5713 	case KEY_OPT_U8:
5714 		calc_x(*key_x, p[offset], p[moffset]);
5715 		calc_y(*key_y, p[offset], p[moffset]);
5716 
5717 		return true;
5718 	case KEY_OPT_LE16:
5719 		calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5720 		calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5721 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5722 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5723 
5724 		return true;
5725 	case KEY_OPT_LE32:
5726 		calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5727 		calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5728 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5729 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5730 
5731 		return true;
5732 	case KEY_OPT_MAC:
5733 		for (i = 0; i < ETH_ALEN; i++) {
5734 			calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5735 			       p[moffset + i]);
5736 			calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5737 			       p[moffset + i]);
5738 		}
5739 
5740 		return true;
5741 	case KEY_OPT_IP:
5742 		ip_offset = IPV4_INDEX * sizeof(u32);
5743 		calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5744 		       *(u32 *)(&p[moffset + ip_offset]));
5745 		calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5746 		       *(u32 *)(&p[moffset + ip_offset]));
5747 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5748 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5749 
5750 		return true;
5751 	default:
5752 		return false;
5753 	}
5754 }
5755 
hclge_get_port_number(enum HLCGE_PORT_TYPE port_type,u8 pf_id,u8 vf_id,u8 network_port_id)5756 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5757 				 u8 vf_id, u8 network_port_id)
5758 {
5759 	u32 port_number = 0;
5760 
5761 	if (port_type == HOST_PORT) {
5762 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5763 				pf_id);
5764 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5765 				vf_id);
5766 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5767 	} else {
5768 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5769 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5770 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5771 	}
5772 
5773 	return port_number;
5774 }
5775 
hclge_fd_convert_meta_data(struct hclge_fd_key_cfg * key_cfg,__le32 * key_x,__le32 * key_y,struct hclge_fd_rule * rule)5776 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5777 				       __le32 *key_x, __le32 *key_y,
5778 				       struct hclge_fd_rule *rule)
5779 {
5780 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5781 	u8 cur_pos = 0, tuple_size, shift_bits;
5782 	unsigned int i;
5783 
5784 	for (i = 0; i < MAX_META_DATA; i++) {
5785 		tuple_size = meta_data_key_info[i].key_length;
5786 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5787 
5788 		switch (tuple_bit) {
5789 		case BIT(ROCE_TYPE):
5790 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5791 			cur_pos += tuple_size;
5792 			break;
5793 		case BIT(DST_VPORT):
5794 			port_number = hclge_get_port_number(HOST_PORT, 0,
5795 							    rule->vf_id, 0);
5796 			hnae3_set_field(meta_data,
5797 					GENMASK(cur_pos + tuple_size, cur_pos),
5798 					cur_pos, port_number);
5799 			cur_pos += tuple_size;
5800 			break;
5801 		default:
5802 			break;
5803 		}
5804 	}
5805 
5806 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5807 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5808 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5809 
5810 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5811 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5812 }
5813 
5814 /* A complete key is combined with meta data key and tuple key.
5815  * Meta data key is stored at the MSB region, and tuple key is stored at
5816  * the LSB region, unused bits will be filled 0.
5817  */
hclge_config_key(struct hclge_dev * hdev,u8 stage,struct hclge_fd_rule * rule)5818 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5819 			    struct hclge_fd_rule *rule)
5820 {
5821 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5822 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5823 	u8 *cur_key_x, *cur_key_y;
5824 	u8 meta_data_region;
5825 	u8 tuple_size;
5826 	int ret;
5827 	u32 i;
5828 
5829 	memset(key_x, 0, sizeof(key_x));
5830 	memset(key_y, 0, sizeof(key_y));
5831 	cur_key_x = key_x;
5832 	cur_key_y = key_y;
5833 
5834 	for (i = 0 ; i < MAX_TUPLE; i++) {
5835 		bool tuple_valid;
5836 
5837 		tuple_size = tuple_key_info[i].key_length / 8;
5838 		if (!(key_cfg->tuple_active & BIT(i)))
5839 			continue;
5840 
5841 		tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5842 						     cur_key_y, rule);
5843 		if (tuple_valid) {
5844 			cur_key_x += tuple_size;
5845 			cur_key_y += tuple_size;
5846 		}
5847 	}
5848 
5849 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5850 			MAX_META_DATA_LENGTH / 8;
5851 
5852 	hclge_fd_convert_meta_data(key_cfg,
5853 				   (__le32 *)(key_x + meta_data_region),
5854 				   (__le32 *)(key_y + meta_data_region),
5855 				   rule);
5856 
5857 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5858 				   true);
5859 	if (ret) {
5860 		dev_err(&hdev->pdev->dev,
5861 			"fd key_y config fail, loc=%u, ret=%d\n",
5862 			rule->queue_id, ret);
5863 		return ret;
5864 	}
5865 
5866 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5867 				   true);
5868 	if (ret)
5869 		dev_err(&hdev->pdev->dev,
5870 			"fd key_x config fail, loc=%u, ret=%d\n",
5871 			rule->queue_id, ret);
5872 	return ret;
5873 }
5874 
hclge_config_action(struct hclge_dev * hdev,u8 stage,struct hclge_fd_rule * rule)5875 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5876 			       struct hclge_fd_rule *rule)
5877 {
5878 	struct hclge_vport *vport = hdev->vport;
5879 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5880 	struct hclge_fd_ad_data ad_data;
5881 
5882 	memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5883 	ad_data.ad_id = rule->location;
5884 
5885 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5886 		ad_data.drop_packet = true;
5887 	} else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5888 		ad_data.override_tc = true;
5889 		ad_data.queue_id =
5890 			kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5891 		ad_data.tc_size =
5892 			ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5893 	} else {
5894 		ad_data.forward_to_direct_queue = true;
5895 		ad_data.queue_id = rule->queue_id;
5896 	}
5897 
5898 	ad_data.use_counter = false;
5899 	ad_data.counter_id = 0;
5900 
5901 	ad_data.use_next_stage = false;
5902 	ad_data.next_input_key = 0;
5903 
5904 	ad_data.write_rule_id_to_bd = true;
5905 	ad_data.rule_id = rule->location;
5906 
5907 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5908 }
5909 
hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec * spec,u32 * unused_tuple)5910 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5911 				       u32 *unused_tuple)
5912 {
5913 	if (!spec || !unused_tuple)
5914 		return -EINVAL;
5915 
5916 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5917 
5918 	if (!spec->ip4src)
5919 		*unused_tuple |= BIT(INNER_SRC_IP);
5920 
5921 	if (!spec->ip4dst)
5922 		*unused_tuple |= BIT(INNER_DST_IP);
5923 
5924 	if (!spec->psrc)
5925 		*unused_tuple |= BIT(INNER_SRC_PORT);
5926 
5927 	if (!spec->pdst)
5928 		*unused_tuple |= BIT(INNER_DST_PORT);
5929 
5930 	if (!spec->tos)
5931 		*unused_tuple |= BIT(INNER_IP_TOS);
5932 
5933 	return 0;
5934 }
5935 
hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec * spec,u32 * unused_tuple)5936 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5937 				    u32 *unused_tuple)
5938 {
5939 	if (!spec || !unused_tuple)
5940 		return -EINVAL;
5941 
5942 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5943 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5944 
5945 	if (!spec->ip4src)
5946 		*unused_tuple |= BIT(INNER_SRC_IP);
5947 
5948 	if (!spec->ip4dst)
5949 		*unused_tuple |= BIT(INNER_DST_IP);
5950 
5951 	if (!spec->tos)
5952 		*unused_tuple |= BIT(INNER_IP_TOS);
5953 
5954 	if (!spec->proto)
5955 		*unused_tuple |= BIT(INNER_IP_PROTO);
5956 
5957 	if (spec->l4_4_bytes)
5958 		return -EOPNOTSUPP;
5959 
5960 	if (spec->ip_ver != ETH_RX_NFC_IP4)
5961 		return -EOPNOTSUPP;
5962 
5963 	return 0;
5964 }
5965 
hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec * spec,u32 * unused_tuple)5966 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5967 				       u32 *unused_tuple)
5968 {
5969 	if (!spec || !unused_tuple)
5970 		return -EINVAL;
5971 
5972 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5973 
5974 	/* check whether src/dst ip address used */
5975 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5976 		*unused_tuple |= BIT(INNER_SRC_IP);
5977 
5978 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5979 		*unused_tuple |= BIT(INNER_DST_IP);
5980 
5981 	if (!spec->psrc)
5982 		*unused_tuple |= BIT(INNER_SRC_PORT);
5983 
5984 	if (!spec->pdst)
5985 		*unused_tuple |= BIT(INNER_DST_PORT);
5986 
5987 	if (!spec->tclass)
5988 		*unused_tuple |= BIT(INNER_IP_TOS);
5989 
5990 	return 0;
5991 }
5992 
hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec * spec,u32 * unused_tuple)5993 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5994 				    u32 *unused_tuple)
5995 {
5996 	if (!spec || !unused_tuple)
5997 		return -EINVAL;
5998 
5999 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6000 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6001 
6002 	/* check whether src/dst ip address used */
6003 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6004 		*unused_tuple |= BIT(INNER_SRC_IP);
6005 
6006 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6007 		*unused_tuple |= BIT(INNER_DST_IP);
6008 
6009 	if (!spec->l4_proto)
6010 		*unused_tuple |= BIT(INNER_IP_PROTO);
6011 
6012 	if (!spec->tclass)
6013 		*unused_tuple |= BIT(INNER_IP_TOS);
6014 
6015 	if (spec->l4_4_bytes)
6016 		return -EOPNOTSUPP;
6017 
6018 	return 0;
6019 }
6020 
hclge_fd_check_ether_tuple(struct ethhdr * spec,u32 * unused_tuple)6021 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6022 {
6023 	if (!spec || !unused_tuple)
6024 		return -EINVAL;
6025 
6026 	*unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6027 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6028 		BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6029 
6030 	if (is_zero_ether_addr(spec->h_source))
6031 		*unused_tuple |= BIT(INNER_SRC_MAC);
6032 
6033 	if (is_zero_ether_addr(spec->h_dest))
6034 		*unused_tuple |= BIT(INNER_DST_MAC);
6035 
6036 	if (!spec->h_proto)
6037 		*unused_tuple |= BIT(INNER_ETH_TYPE);
6038 
6039 	return 0;
6040 }
6041 
hclge_fd_check_ext_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,u32 * unused_tuple)6042 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6043 				    struct ethtool_rx_flow_spec *fs,
6044 				    u32 *unused_tuple)
6045 {
6046 	if (fs->flow_type & FLOW_EXT) {
6047 		if (fs->h_ext.vlan_etype) {
6048 			dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6049 			return -EOPNOTSUPP;
6050 		}
6051 
6052 		if (!fs->h_ext.vlan_tci)
6053 			*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6054 
6055 		if (fs->m_ext.vlan_tci &&
6056 		    be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6057 			dev_err(&hdev->pdev->dev,
6058 				"failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6059 				ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6060 			return -EINVAL;
6061 		}
6062 	} else {
6063 		*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6064 	}
6065 
6066 	if (fs->flow_type & FLOW_MAC_EXT) {
6067 		if (hdev->fd_cfg.fd_mode !=
6068 		    HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6069 			dev_err(&hdev->pdev->dev,
6070 				"FLOW_MAC_EXT is not supported in current fd mode!\n");
6071 			return -EOPNOTSUPP;
6072 		}
6073 
6074 		if (is_zero_ether_addr(fs->h_ext.h_dest))
6075 			*unused_tuple |= BIT(INNER_DST_MAC);
6076 		else
6077 			*unused_tuple &= ~BIT(INNER_DST_MAC);
6078 	}
6079 
6080 	return 0;
6081 }
6082 
hclge_fd_get_user_def_layer(u32 flow_type,u32 * unused_tuple,struct hclge_fd_user_def_info * info)6083 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6084 				       struct hclge_fd_user_def_info *info)
6085 {
6086 	switch (flow_type) {
6087 	case ETHER_FLOW:
6088 		info->layer = HCLGE_FD_USER_DEF_L2;
6089 		*unused_tuple &= ~BIT(INNER_L2_RSV);
6090 		break;
6091 	case IP_USER_FLOW:
6092 	case IPV6_USER_FLOW:
6093 		info->layer = HCLGE_FD_USER_DEF_L3;
6094 		*unused_tuple &= ~BIT(INNER_L3_RSV);
6095 		break;
6096 	case TCP_V4_FLOW:
6097 	case UDP_V4_FLOW:
6098 	case TCP_V6_FLOW:
6099 	case UDP_V6_FLOW:
6100 		info->layer = HCLGE_FD_USER_DEF_L4;
6101 		*unused_tuple &= ~BIT(INNER_L4_RSV);
6102 		break;
6103 	default:
6104 		return -EOPNOTSUPP;
6105 	}
6106 
6107 	return 0;
6108 }
6109 
hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec * fs)6110 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6111 {
6112 	return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6113 }
6114 
hclge_fd_parse_user_def_field(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,u32 * unused_tuple,struct hclge_fd_user_def_info * info)6115 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6116 					 struct ethtool_rx_flow_spec *fs,
6117 					 u32 *unused_tuple,
6118 					 struct hclge_fd_user_def_info *info)
6119 {
6120 	u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6121 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6122 	u16 data, offset, data_mask, offset_mask;
6123 	int ret;
6124 
6125 	info->layer = HCLGE_FD_USER_DEF_NONE;
6126 	*unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6127 
6128 	if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6129 		return 0;
6130 
6131 	/* user-def data from ethtool is 64 bit value, the bit0~15 is used
6132 	 * for data, and bit32~47 is used for offset.
6133 	 */
6134 	data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6135 	data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6136 	offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6137 	offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6138 
6139 	if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6140 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6141 		return -EOPNOTSUPP;
6142 	}
6143 
6144 	if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6145 		dev_err(&hdev->pdev->dev,
6146 			"user-def offset[%u] should be no more than %u\n",
6147 			offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6148 		return -EINVAL;
6149 	}
6150 
6151 	if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6152 		dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6153 		return -EINVAL;
6154 	}
6155 
6156 	ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6157 	if (ret) {
6158 		dev_err(&hdev->pdev->dev,
6159 			"unsupported flow type for user-def bytes, ret = %d\n",
6160 			ret);
6161 		return ret;
6162 	}
6163 
6164 	info->data = data;
6165 	info->data_mask = data_mask;
6166 	info->offset = offset;
6167 
6168 	return 0;
6169 }
6170 
hclge_fd_check_spec(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,u32 * unused_tuple,struct hclge_fd_user_def_info * info)6171 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6172 			       struct ethtool_rx_flow_spec *fs,
6173 			       u32 *unused_tuple,
6174 			       struct hclge_fd_user_def_info *info)
6175 {
6176 	u32 flow_type;
6177 	int ret;
6178 
6179 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6180 		dev_err(&hdev->pdev->dev,
6181 			"failed to config fd rules, invalid rule location: %u, max is %u\n.",
6182 			fs->location,
6183 			hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6184 		return -EINVAL;
6185 	}
6186 
6187 	ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6188 	if (ret)
6189 		return ret;
6190 
6191 	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6192 	switch (flow_type) {
6193 	case SCTP_V4_FLOW:
6194 	case TCP_V4_FLOW:
6195 	case UDP_V4_FLOW:
6196 		ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6197 						  unused_tuple);
6198 		break;
6199 	case IP_USER_FLOW:
6200 		ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6201 					       unused_tuple);
6202 		break;
6203 	case SCTP_V6_FLOW:
6204 	case TCP_V6_FLOW:
6205 	case UDP_V6_FLOW:
6206 		ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6207 						  unused_tuple);
6208 		break;
6209 	case IPV6_USER_FLOW:
6210 		ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6211 					       unused_tuple);
6212 		break;
6213 	case ETHER_FLOW:
6214 		if (hdev->fd_cfg.fd_mode !=
6215 			HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6216 			dev_err(&hdev->pdev->dev,
6217 				"ETHER_FLOW is not supported in current fd mode!\n");
6218 			return -EOPNOTSUPP;
6219 		}
6220 
6221 		ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6222 						 unused_tuple);
6223 		break;
6224 	default:
6225 		dev_err(&hdev->pdev->dev,
6226 			"unsupported protocol type, protocol type = %#x\n",
6227 			flow_type);
6228 		return -EOPNOTSUPP;
6229 	}
6230 
6231 	if (ret) {
6232 		dev_err(&hdev->pdev->dev,
6233 			"failed to check flow union tuple, ret = %d\n",
6234 			ret);
6235 		return ret;
6236 	}
6237 
6238 	return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6239 }
6240 
hclge_fd_get_tcpip4_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule,u8 ip_proto)6241 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6242 				      struct ethtool_rx_flow_spec *fs,
6243 				      struct hclge_fd_rule *rule, u8 ip_proto)
6244 {
6245 	rule->tuples.src_ip[IPV4_INDEX] =
6246 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6247 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6248 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6249 
6250 	rule->tuples.dst_ip[IPV4_INDEX] =
6251 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6252 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6253 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6254 
6255 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6256 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6257 
6258 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6259 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6260 
6261 	rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6262 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6263 
6264 	rule->tuples.ether_proto = ETH_P_IP;
6265 	rule->tuples_mask.ether_proto = 0xFFFF;
6266 
6267 	rule->tuples.ip_proto = ip_proto;
6268 	rule->tuples_mask.ip_proto = 0xFF;
6269 }
6270 
hclge_fd_get_ip4_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6271 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6272 				   struct ethtool_rx_flow_spec *fs,
6273 				   struct hclge_fd_rule *rule)
6274 {
6275 	rule->tuples.src_ip[IPV4_INDEX] =
6276 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6277 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6278 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6279 
6280 	rule->tuples.dst_ip[IPV4_INDEX] =
6281 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6282 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6283 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6284 
6285 	rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6286 	rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6287 
6288 	rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6289 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6290 
6291 	rule->tuples.ether_proto = ETH_P_IP;
6292 	rule->tuples_mask.ether_proto = 0xFFFF;
6293 }
6294 
hclge_fd_get_tcpip6_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule,u8 ip_proto)6295 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6296 				      struct ethtool_rx_flow_spec *fs,
6297 				      struct hclge_fd_rule *rule, u8 ip_proto)
6298 {
6299 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6300 			  IPV6_SIZE);
6301 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6302 			  IPV6_SIZE);
6303 
6304 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6305 			  IPV6_SIZE);
6306 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6307 			  IPV6_SIZE);
6308 
6309 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6310 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6311 
6312 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6313 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6314 
6315 	rule->tuples.ether_proto = ETH_P_IPV6;
6316 	rule->tuples_mask.ether_proto = 0xFFFF;
6317 
6318 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6319 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6320 
6321 	rule->tuples.ip_proto = ip_proto;
6322 	rule->tuples_mask.ip_proto = 0xFF;
6323 }
6324 
hclge_fd_get_ip6_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6325 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6326 				   struct ethtool_rx_flow_spec *fs,
6327 				   struct hclge_fd_rule *rule)
6328 {
6329 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6330 			  IPV6_SIZE);
6331 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6332 			  IPV6_SIZE);
6333 
6334 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6335 			  IPV6_SIZE);
6336 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6337 			  IPV6_SIZE);
6338 
6339 	rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6340 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6341 
6342 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6343 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6344 
6345 	rule->tuples.ether_proto = ETH_P_IPV6;
6346 	rule->tuples_mask.ether_proto = 0xFFFF;
6347 }
6348 
hclge_fd_get_ether_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6349 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6350 				     struct ethtool_rx_flow_spec *fs,
6351 				     struct hclge_fd_rule *rule)
6352 {
6353 	ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6354 	ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6355 
6356 	ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6357 	ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6358 
6359 	rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6360 	rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6361 }
6362 
hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info * info,struct hclge_fd_rule * rule)6363 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6364 					struct hclge_fd_rule *rule)
6365 {
6366 	switch (info->layer) {
6367 	case HCLGE_FD_USER_DEF_L2:
6368 		rule->tuples.l2_user_def = info->data;
6369 		rule->tuples_mask.l2_user_def = info->data_mask;
6370 		break;
6371 	case HCLGE_FD_USER_DEF_L3:
6372 		rule->tuples.l3_user_def = info->data;
6373 		rule->tuples_mask.l3_user_def = info->data_mask;
6374 		break;
6375 	case HCLGE_FD_USER_DEF_L4:
6376 		rule->tuples.l4_user_def = (u32)info->data << 16;
6377 		rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6378 		break;
6379 	default:
6380 		break;
6381 	}
6382 
6383 	rule->ep.user_def = *info;
6384 }
6385 
hclge_fd_get_tuple(struct hclge_dev * hdev,struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule,struct hclge_fd_user_def_info * info)6386 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6387 			      struct ethtool_rx_flow_spec *fs,
6388 			      struct hclge_fd_rule *rule,
6389 			      struct hclge_fd_user_def_info *info)
6390 {
6391 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6392 
6393 	switch (flow_type) {
6394 	case SCTP_V4_FLOW:
6395 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6396 		break;
6397 	case TCP_V4_FLOW:
6398 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6399 		break;
6400 	case UDP_V4_FLOW:
6401 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6402 		break;
6403 	case IP_USER_FLOW:
6404 		hclge_fd_get_ip4_tuple(hdev, fs, rule);
6405 		break;
6406 	case SCTP_V6_FLOW:
6407 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6408 		break;
6409 	case TCP_V6_FLOW:
6410 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6411 		break;
6412 	case UDP_V6_FLOW:
6413 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6414 		break;
6415 	case IPV6_USER_FLOW:
6416 		hclge_fd_get_ip6_tuple(hdev, fs, rule);
6417 		break;
6418 	case ETHER_FLOW:
6419 		hclge_fd_get_ether_tuple(hdev, fs, rule);
6420 		break;
6421 	default:
6422 		return -EOPNOTSUPP;
6423 	}
6424 
6425 	if (fs->flow_type & FLOW_EXT) {
6426 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6427 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6428 		hclge_fd_get_user_def_tuple(info, rule);
6429 	}
6430 
6431 	if (fs->flow_type & FLOW_MAC_EXT) {
6432 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6433 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6434 	}
6435 
6436 	return 0;
6437 }
6438 
hclge_fd_config_rule(struct hclge_dev * hdev,struct hclge_fd_rule * rule)6439 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6440 				struct hclge_fd_rule *rule)
6441 {
6442 	int ret;
6443 
6444 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6445 	if (ret)
6446 		return ret;
6447 
6448 	return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6449 }
6450 
hclge_add_fd_entry_common(struct hclge_dev * hdev,struct hclge_fd_rule * rule)6451 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6452 				     struct hclge_fd_rule *rule)
6453 {
6454 	int ret;
6455 
6456 	spin_lock_bh(&hdev->fd_rule_lock);
6457 
6458 	if (hdev->fd_active_type != rule->rule_type &&
6459 	    (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6460 	     hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6461 		dev_err(&hdev->pdev->dev,
6462 			"mode conflict(new type %d, active type %d), please delete existent rules first\n",
6463 			rule->rule_type, hdev->fd_active_type);
6464 		spin_unlock_bh(&hdev->fd_rule_lock);
6465 		return -EINVAL;
6466 	}
6467 
6468 	ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6469 	if (ret)
6470 		goto out;
6471 
6472 	ret = hclge_clear_arfs_rules(hdev);
6473 	if (ret)
6474 		goto out;
6475 
6476 	ret = hclge_fd_config_rule(hdev, rule);
6477 	if (ret)
6478 		goto out;
6479 
6480 	rule->state = HCLGE_FD_ACTIVE;
6481 	hdev->fd_active_type = rule->rule_type;
6482 	hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6483 
6484 out:
6485 	spin_unlock_bh(&hdev->fd_rule_lock);
6486 	return ret;
6487 }
6488 
hclge_is_cls_flower_active(struct hnae3_handle * handle)6489 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6490 {
6491 	struct hclge_vport *vport = hclge_get_vport(handle);
6492 	struct hclge_dev *hdev = vport->back;
6493 
6494 	return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6495 }
6496 
hclge_fd_parse_ring_cookie(struct hclge_dev * hdev,u64 ring_cookie,u16 * vport_id,u8 * action,u16 * queue_id)6497 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6498 				      u16 *vport_id, u8 *action, u16 *queue_id)
6499 {
6500 	struct hclge_vport *vport = hdev->vport;
6501 
6502 	if (ring_cookie == RX_CLS_FLOW_DISC) {
6503 		*action = HCLGE_FD_ACTION_DROP_PACKET;
6504 	} else {
6505 		u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6506 		u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6507 		u16 tqps;
6508 
6509 		if (vf > hdev->num_req_vfs) {
6510 			dev_err(&hdev->pdev->dev,
6511 				"Error: vf id (%u) > max vf num (%u)\n",
6512 				vf, hdev->num_req_vfs);
6513 			return -EINVAL;
6514 		}
6515 
6516 		*vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6517 		tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6518 
6519 		if (ring >= tqps) {
6520 			dev_err(&hdev->pdev->dev,
6521 				"Error: queue id (%u) > max tqp num (%u)\n",
6522 				ring, tqps - 1);
6523 			return -EINVAL;
6524 		}
6525 
6526 		*action = HCLGE_FD_ACTION_SELECT_QUEUE;
6527 		*queue_id = ring;
6528 	}
6529 
6530 	return 0;
6531 }
6532 
hclge_add_fd_entry(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6533 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6534 			      struct ethtool_rxnfc *cmd)
6535 {
6536 	struct hclge_vport *vport = hclge_get_vport(handle);
6537 	struct hclge_dev *hdev = vport->back;
6538 	struct hclge_fd_user_def_info info;
6539 	u16 dst_vport_id = 0, q_index = 0;
6540 	struct ethtool_rx_flow_spec *fs;
6541 	struct hclge_fd_rule *rule;
6542 	u32 unused = 0;
6543 	u8 action;
6544 	int ret;
6545 
6546 	if (!hnae3_dev_fd_supported(hdev)) {
6547 		dev_err(&hdev->pdev->dev,
6548 			"flow table director is not supported\n");
6549 		return -EOPNOTSUPP;
6550 	}
6551 
6552 	if (!hdev->fd_en) {
6553 		dev_err(&hdev->pdev->dev,
6554 			"please enable flow director first\n");
6555 		return -EOPNOTSUPP;
6556 	}
6557 
6558 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6559 
6560 	ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6561 	if (ret)
6562 		return ret;
6563 
6564 	ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6565 					 &action, &q_index);
6566 	if (ret)
6567 		return ret;
6568 
6569 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6570 	if (!rule)
6571 		return -ENOMEM;
6572 
6573 	ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6574 	if (ret) {
6575 		kfree(rule);
6576 		return ret;
6577 	}
6578 
6579 	rule->flow_type = fs->flow_type;
6580 	rule->location = fs->location;
6581 	rule->unused_tuple = unused;
6582 	rule->vf_id = dst_vport_id;
6583 	rule->queue_id = q_index;
6584 	rule->action = action;
6585 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
6586 
6587 	ret = hclge_add_fd_entry_common(hdev, rule);
6588 	if (ret)
6589 		kfree(rule);
6590 
6591 	return ret;
6592 }
6593 
hclge_del_fd_entry(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6594 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6595 			      struct ethtool_rxnfc *cmd)
6596 {
6597 	struct hclge_vport *vport = hclge_get_vport(handle);
6598 	struct hclge_dev *hdev = vport->back;
6599 	struct ethtool_rx_flow_spec *fs;
6600 	int ret;
6601 
6602 	if (!hnae3_dev_fd_supported(hdev))
6603 		return -EOPNOTSUPP;
6604 
6605 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6606 
6607 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6608 		return -EINVAL;
6609 
6610 	spin_lock_bh(&hdev->fd_rule_lock);
6611 	if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6612 	    !test_bit(fs->location, hdev->fd_bmap)) {
6613 		dev_err(&hdev->pdev->dev,
6614 			"Delete fail, rule %u is inexistent\n", fs->location);
6615 		spin_unlock_bh(&hdev->fd_rule_lock);
6616 		return -ENOENT;
6617 	}
6618 
6619 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6620 				   NULL, false);
6621 	if (ret)
6622 		goto out;
6623 
6624 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6625 
6626 out:
6627 	spin_unlock_bh(&hdev->fd_rule_lock);
6628 	return ret;
6629 }
6630 
hclge_clear_fd_rules_in_list(struct hclge_dev * hdev,bool clear_list)6631 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6632 					 bool clear_list)
6633 {
6634 	struct hclge_fd_rule *rule;
6635 	struct hlist_node *node;
6636 	u16 location;
6637 
6638 	if (!hnae3_dev_fd_supported(hdev))
6639 		return;
6640 
6641 	spin_lock_bh(&hdev->fd_rule_lock);
6642 
6643 	for_each_set_bit(location, hdev->fd_bmap,
6644 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6645 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6646 				     NULL, false);
6647 
6648 	if (clear_list) {
6649 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6650 					  rule_node) {
6651 			hlist_del(&rule->rule_node);
6652 			kfree(rule);
6653 		}
6654 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6655 		hdev->hclge_fd_rule_num = 0;
6656 		bitmap_zero(hdev->fd_bmap,
6657 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6658 	}
6659 
6660 	spin_unlock_bh(&hdev->fd_rule_lock);
6661 }
6662 
hclge_del_all_fd_entries(struct hclge_dev * hdev)6663 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6664 {
6665 	hclge_clear_fd_rules_in_list(hdev, true);
6666 	hclge_fd_disable_user_def(hdev);
6667 }
6668 
hclge_restore_fd_entries(struct hnae3_handle * handle)6669 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6670 {
6671 	struct hclge_vport *vport = hclge_get_vport(handle);
6672 	struct hclge_dev *hdev = vport->back;
6673 	struct hclge_fd_rule *rule;
6674 	struct hlist_node *node;
6675 
6676 	/* Return ok here, because reset error handling will check this
6677 	 * return value. If error is returned here, the reset process will
6678 	 * fail.
6679 	 */
6680 	if (!hnae3_dev_fd_supported(hdev))
6681 		return 0;
6682 
6683 	/* if fd is disabled, should not restore it when reset */
6684 	if (!hdev->fd_en)
6685 		return 0;
6686 
6687 	spin_lock_bh(&hdev->fd_rule_lock);
6688 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6689 		if (rule->state == HCLGE_FD_ACTIVE)
6690 			rule->state = HCLGE_FD_TO_ADD;
6691 	}
6692 	spin_unlock_bh(&hdev->fd_rule_lock);
6693 	set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6694 
6695 	return 0;
6696 }
6697 
hclge_get_fd_rule_cnt(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6698 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6699 				 struct ethtool_rxnfc *cmd)
6700 {
6701 	struct hclge_vport *vport = hclge_get_vport(handle);
6702 	struct hclge_dev *hdev = vport->back;
6703 
6704 	if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6705 		return -EOPNOTSUPP;
6706 
6707 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
6708 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6709 
6710 	return 0;
6711 }
6712 
hclge_fd_get_tcpip4_info(struct hclge_fd_rule * rule,struct ethtool_tcpip4_spec * spec,struct ethtool_tcpip4_spec * spec_mask)6713 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6714 				     struct ethtool_tcpip4_spec *spec,
6715 				     struct ethtool_tcpip4_spec *spec_mask)
6716 {
6717 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6718 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6719 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6720 
6721 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6722 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6723 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6724 
6725 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6726 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6727 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6728 
6729 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6730 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6731 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6732 
6733 	spec->tos = rule->tuples.ip_tos;
6734 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6735 			0 : rule->tuples_mask.ip_tos;
6736 }
6737 
hclge_fd_get_ip4_info(struct hclge_fd_rule * rule,struct ethtool_usrip4_spec * spec,struct ethtool_usrip4_spec * spec_mask)6738 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6739 				  struct ethtool_usrip4_spec *spec,
6740 				  struct ethtool_usrip4_spec *spec_mask)
6741 {
6742 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6743 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6744 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6745 
6746 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6747 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6748 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6749 
6750 	spec->tos = rule->tuples.ip_tos;
6751 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6752 			0 : rule->tuples_mask.ip_tos;
6753 
6754 	spec->proto = rule->tuples.ip_proto;
6755 	spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6756 			0 : rule->tuples_mask.ip_proto;
6757 
6758 	spec->ip_ver = ETH_RX_NFC_IP4;
6759 }
6760 
hclge_fd_get_tcpip6_info(struct hclge_fd_rule * rule,struct ethtool_tcpip6_spec * spec,struct ethtool_tcpip6_spec * spec_mask)6761 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6762 				     struct ethtool_tcpip6_spec *spec,
6763 				     struct ethtool_tcpip6_spec *spec_mask)
6764 {
6765 	cpu_to_be32_array(spec->ip6src,
6766 			  rule->tuples.src_ip, IPV6_SIZE);
6767 	cpu_to_be32_array(spec->ip6dst,
6768 			  rule->tuples.dst_ip, IPV6_SIZE);
6769 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6770 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6771 	else
6772 		cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6773 				  IPV6_SIZE);
6774 
6775 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6776 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6777 	else
6778 		cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6779 				  IPV6_SIZE);
6780 
6781 	spec->tclass = rule->tuples.ip_tos;
6782 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6783 			0 : rule->tuples_mask.ip_tos;
6784 
6785 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6786 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6787 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6788 
6789 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6790 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6791 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6792 }
6793 
hclge_fd_get_ip6_info(struct hclge_fd_rule * rule,struct ethtool_usrip6_spec * spec,struct ethtool_usrip6_spec * spec_mask)6794 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6795 				  struct ethtool_usrip6_spec *spec,
6796 				  struct ethtool_usrip6_spec *spec_mask)
6797 {
6798 	cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6799 	cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6800 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6801 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6802 	else
6803 		cpu_to_be32_array(spec_mask->ip6src,
6804 				  rule->tuples_mask.src_ip, IPV6_SIZE);
6805 
6806 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6807 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6808 	else
6809 		cpu_to_be32_array(spec_mask->ip6dst,
6810 				  rule->tuples_mask.dst_ip, IPV6_SIZE);
6811 
6812 	spec->tclass = rule->tuples.ip_tos;
6813 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6814 			0 : rule->tuples_mask.ip_tos;
6815 
6816 	spec->l4_proto = rule->tuples.ip_proto;
6817 	spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6818 			0 : rule->tuples_mask.ip_proto;
6819 }
6820 
hclge_fd_get_ether_info(struct hclge_fd_rule * rule,struct ethhdr * spec,struct ethhdr * spec_mask)6821 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6822 				    struct ethhdr *spec,
6823 				    struct ethhdr *spec_mask)
6824 {
6825 	ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6826 	ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6827 
6828 	if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6829 		eth_zero_addr(spec_mask->h_source);
6830 	else
6831 		ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6832 
6833 	if (rule->unused_tuple & BIT(INNER_DST_MAC))
6834 		eth_zero_addr(spec_mask->h_dest);
6835 	else
6836 		ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6837 
6838 	spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6839 	spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6840 			0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6841 }
6842 
hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6843 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6844 				       struct hclge_fd_rule *rule)
6845 {
6846 	if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6847 	    HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6848 		fs->h_ext.data[0] = 0;
6849 		fs->h_ext.data[1] = 0;
6850 		fs->m_ext.data[0] = 0;
6851 		fs->m_ext.data[1] = 0;
6852 	} else {
6853 		fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6854 		fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6855 		fs->m_ext.data[0] =
6856 				cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6857 		fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6858 	}
6859 }
6860 
hclge_fd_get_ext_info(struct ethtool_rx_flow_spec * fs,struct hclge_fd_rule * rule)6861 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6862 				  struct hclge_fd_rule *rule)
6863 {
6864 	if (fs->flow_type & FLOW_EXT) {
6865 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6866 		fs->m_ext.vlan_tci =
6867 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6868 				0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6869 
6870 		hclge_fd_get_user_def_info(fs, rule);
6871 	}
6872 
6873 	if (fs->flow_type & FLOW_MAC_EXT) {
6874 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6875 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6876 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6877 		else
6878 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6879 					rule->tuples_mask.dst_mac);
6880 	}
6881 }
6882 
hclge_get_fd_rule_info(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd)6883 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6884 				  struct ethtool_rxnfc *cmd)
6885 {
6886 	struct hclge_vport *vport = hclge_get_vport(handle);
6887 	struct hclge_fd_rule *rule = NULL;
6888 	struct hclge_dev *hdev = vport->back;
6889 	struct ethtool_rx_flow_spec *fs;
6890 	struct hlist_node *node2;
6891 
6892 	if (!hnae3_dev_fd_supported(hdev))
6893 		return -EOPNOTSUPP;
6894 
6895 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6896 
6897 	spin_lock_bh(&hdev->fd_rule_lock);
6898 
6899 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6900 		if (rule->location >= fs->location)
6901 			break;
6902 	}
6903 
6904 	if (!rule || fs->location != rule->location) {
6905 		spin_unlock_bh(&hdev->fd_rule_lock);
6906 
6907 		return -ENOENT;
6908 	}
6909 
6910 	fs->flow_type = rule->flow_type;
6911 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6912 	case SCTP_V4_FLOW:
6913 	case TCP_V4_FLOW:
6914 	case UDP_V4_FLOW:
6915 		hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6916 					 &fs->m_u.tcp_ip4_spec);
6917 		break;
6918 	case IP_USER_FLOW:
6919 		hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6920 				      &fs->m_u.usr_ip4_spec);
6921 		break;
6922 	case SCTP_V6_FLOW:
6923 	case TCP_V6_FLOW:
6924 	case UDP_V6_FLOW:
6925 		hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6926 					 &fs->m_u.tcp_ip6_spec);
6927 		break;
6928 	case IPV6_USER_FLOW:
6929 		hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6930 				      &fs->m_u.usr_ip6_spec);
6931 		break;
6932 	/* The flow type of fd rule has been checked before adding in to rule
6933 	 * list. As other flow types have been handled, it must be ETHER_FLOW
6934 	 * for the default case
6935 	 */
6936 	default:
6937 		hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6938 					&fs->m_u.ether_spec);
6939 		break;
6940 	}
6941 
6942 	hclge_fd_get_ext_info(fs, rule);
6943 
6944 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6945 		fs->ring_cookie = RX_CLS_FLOW_DISC;
6946 	} else {
6947 		u64 vf_id;
6948 
6949 		fs->ring_cookie = rule->queue_id;
6950 		vf_id = rule->vf_id;
6951 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6952 		fs->ring_cookie |= vf_id;
6953 	}
6954 
6955 	spin_unlock_bh(&hdev->fd_rule_lock);
6956 
6957 	return 0;
6958 }
6959 
hclge_get_all_rules(struct hnae3_handle * handle,struct ethtool_rxnfc * cmd,u32 * rule_locs)6960 static int hclge_get_all_rules(struct hnae3_handle *handle,
6961 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
6962 {
6963 	struct hclge_vport *vport = hclge_get_vport(handle);
6964 	struct hclge_dev *hdev = vport->back;
6965 	struct hclge_fd_rule *rule;
6966 	struct hlist_node *node2;
6967 	int cnt = 0;
6968 
6969 	if (!hnae3_dev_fd_supported(hdev))
6970 		return -EOPNOTSUPP;
6971 
6972 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6973 
6974 	spin_lock_bh(&hdev->fd_rule_lock);
6975 	hlist_for_each_entry_safe(rule, node2,
6976 				  &hdev->fd_rule_list, rule_node) {
6977 		if (cnt == cmd->rule_cnt) {
6978 			spin_unlock_bh(&hdev->fd_rule_lock);
6979 			return -EMSGSIZE;
6980 		}
6981 
6982 		if (rule->state == HCLGE_FD_TO_DEL)
6983 			continue;
6984 
6985 		rule_locs[cnt] = rule->location;
6986 		cnt++;
6987 	}
6988 
6989 	spin_unlock_bh(&hdev->fd_rule_lock);
6990 
6991 	cmd->rule_cnt = cnt;
6992 
6993 	return 0;
6994 }
6995 
hclge_fd_get_flow_tuples(const struct flow_keys * fkeys,struct hclge_fd_rule_tuples * tuples)6996 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6997 				     struct hclge_fd_rule_tuples *tuples)
6998 {
6999 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7000 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7001 
7002 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7003 	tuples->ip_proto = fkeys->basic.ip_proto;
7004 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7005 
7006 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7007 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7008 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7009 	} else {
7010 		int i;
7011 
7012 		for (i = 0; i < IPV6_SIZE; i++) {
7013 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7014 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7015 		}
7016 	}
7017 }
7018 
7019 /* traverse all rules, check whether an existed rule has the same tuples */
7020 static struct hclge_fd_rule *
hclge_fd_search_flow_keys(struct hclge_dev * hdev,const struct hclge_fd_rule_tuples * tuples)7021 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7022 			  const struct hclge_fd_rule_tuples *tuples)
7023 {
7024 	struct hclge_fd_rule *rule = NULL;
7025 	struct hlist_node *node;
7026 
7027 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7028 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7029 			return rule;
7030 	}
7031 
7032 	return NULL;
7033 }
7034 
hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples * tuples,struct hclge_fd_rule * rule)7035 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7036 				     struct hclge_fd_rule *rule)
7037 {
7038 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7039 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7040 			     BIT(INNER_SRC_PORT);
7041 	rule->action = 0;
7042 	rule->vf_id = 0;
7043 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7044 	rule->state = HCLGE_FD_TO_ADD;
7045 	if (tuples->ether_proto == ETH_P_IP) {
7046 		if (tuples->ip_proto == IPPROTO_TCP)
7047 			rule->flow_type = TCP_V4_FLOW;
7048 		else
7049 			rule->flow_type = UDP_V4_FLOW;
7050 	} else {
7051 		if (tuples->ip_proto == IPPROTO_TCP)
7052 			rule->flow_type = TCP_V6_FLOW;
7053 		else
7054 			rule->flow_type = UDP_V6_FLOW;
7055 	}
7056 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7057 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7058 }
7059 
hclge_add_fd_entry_by_arfs(struct hnae3_handle * handle,u16 queue_id,u16 flow_id,struct flow_keys * fkeys)7060 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7061 				      u16 flow_id, struct flow_keys *fkeys)
7062 {
7063 	struct hclge_vport *vport = hclge_get_vport(handle);
7064 	struct hclge_fd_rule_tuples new_tuples = {};
7065 	struct hclge_dev *hdev = vport->back;
7066 	struct hclge_fd_rule *rule;
7067 	u16 bit_id;
7068 
7069 	if (!hnae3_dev_fd_supported(hdev))
7070 		return -EOPNOTSUPP;
7071 
7072 	/* when there is already fd rule existed add by user,
7073 	 * arfs should not work
7074 	 */
7075 	spin_lock_bh(&hdev->fd_rule_lock);
7076 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7077 	    hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7078 		spin_unlock_bh(&hdev->fd_rule_lock);
7079 		return -EOPNOTSUPP;
7080 	}
7081 
7082 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7083 
7084 	/* check is there flow director filter existed for this flow,
7085 	 * if not, create a new filter for it;
7086 	 * if filter exist with different queue id, modify the filter;
7087 	 * if filter exist with same queue id, do nothing
7088 	 */
7089 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7090 	if (!rule) {
7091 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7092 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7093 			spin_unlock_bh(&hdev->fd_rule_lock);
7094 			return -ENOSPC;
7095 		}
7096 
7097 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7098 		if (!rule) {
7099 			spin_unlock_bh(&hdev->fd_rule_lock);
7100 			return -ENOMEM;
7101 		}
7102 
7103 		rule->location = bit_id;
7104 		rule->arfs.flow_id = flow_id;
7105 		rule->queue_id = queue_id;
7106 		hclge_fd_build_arfs_rule(&new_tuples, rule);
7107 		hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7108 		hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7109 	} else if (rule->queue_id != queue_id) {
7110 		rule->queue_id = queue_id;
7111 		rule->state = HCLGE_FD_TO_ADD;
7112 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7113 		hclge_task_schedule(hdev, 0);
7114 	}
7115 	spin_unlock_bh(&hdev->fd_rule_lock);
7116 	return rule->location;
7117 }
7118 
hclge_rfs_filter_expire(struct hclge_dev * hdev)7119 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7120 {
7121 #ifdef CONFIG_RFS_ACCEL
7122 	struct hnae3_handle *handle = &hdev->vport[0].nic;
7123 	struct hclge_fd_rule *rule;
7124 	struct hlist_node *node;
7125 
7126 	spin_lock_bh(&hdev->fd_rule_lock);
7127 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7128 		spin_unlock_bh(&hdev->fd_rule_lock);
7129 		return;
7130 	}
7131 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7132 		if (rule->state != HCLGE_FD_ACTIVE)
7133 			continue;
7134 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7135 					rule->arfs.flow_id, rule->location)) {
7136 			rule->state = HCLGE_FD_TO_DEL;
7137 			set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7138 		}
7139 	}
7140 	spin_unlock_bh(&hdev->fd_rule_lock);
7141 #endif
7142 }
7143 
7144 /* make sure being called after lock up with fd_rule_lock */
hclge_clear_arfs_rules(struct hclge_dev * hdev)7145 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7146 {
7147 #ifdef CONFIG_RFS_ACCEL
7148 	struct hclge_fd_rule *rule;
7149 	struct hlist_node *node;
7150 	int ret;
7151 
7152 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7153 		return 0;
7154 
7155 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7156 		switch (rule->state) {
7157 		case HCLGE_FD_TO_DEL:
7158 		case HCLGE_FD_ACTIVE:
7159 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7160 						   rule->location, NULL, false);
7161 			if (ret)
7162 				return ret;
7163 			fallthrough;
7164 		case HCLGE_FD_TO_ADD:
7165 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7166 			hlist_del(&rule->rule_node);
7167 			kfree(rule);
7168 			break;
7169 		default:
7170 			break;
7171 		}
7172 	}
7173 	hclge_sync_fd_state(hdev);
7174 
7175 #endif
7176 	return 0;
7177 }
7178 
hclge_get_cls_key_basic(const struct flow_rule * flow,struct hclge_fd_rule * rule)7179 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7180 				    struct hclge_fd_rule *rule)
7181 {
7182 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7183 		struct flow_match_basic match;
7184 		u16 ethtype_key, ethtype_mask;
7185 
7186 		flow_rule_match_basic(flow, &match);
7187 		ethtype_key = ntohs(match.key->n_proto);
7188 		ethtype_mask = ntohs(match.mask->n_proto);
7189 
7190 		if (ethtype_key == ETH_P_ALL) {
7191 			ethtype_key = 0;
7192 			ethtype_mask = 0;
7193 		}
7194 		rule->tuples.ether_proto = ethtype_key;
7195 		rule->tuples_mask.ether_proto = ethtype_mask;
7196 		rule->tuples.ip_proto = match.key->ip_proto;
7197 		rule->tuples_mask.ip_proto = match.mask->ip_proto;
7198 	} else {
7199 		rule->unused_tuple |= BIT(INNER_IP_PROTO);
7200 		rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7201 	}
7202 }
7203 
hclge_get_cls_key_mac(const struct flow_rule * flow,struct hclge_fd_rule * rule)7204 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7205 				  struct hclge_fd_rule *rule)
7206 {
7207 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7208 		struct flow_match_eth_addrs match;
7209 
7210 		flow_rule_match_eth_addrs(flow, &match);
7211 		ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7212 		ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7213 		ether_addr_copy(rule->tuples.src_mac, match.key->src);
7214 		ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7215 	} else {
7216 		rule->unused_tuple |= BIT(INNER_DST_MAC);
7217 		rule->unused_tuple |= BIT(INNER_SRC_MAC);
7218 	}
7219 }
7220 
hclge_get_cls_key_vlan(const struct flow_rule * flow,struct hclge_fd_rule * rule)7221 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7222 				   struct hclge_fd_rule *rule)
7223 {
7224 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7225 		struct flow_match_vlan match;
7226 
7227 		flow_rule_match_vlan(flow, &match);
7228 		rule->tuples.vlan_tag1 = match.key->vlan_id |
7229 				(match.key->vlan_priority << VLAN_PRIO_SHIFT);
7230 		rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7231 				(match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7232 	} else {
7233 		rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7234 	}
7235 }
7236 
hclge_get_cls_key_ip(const struct flow_rule * flow,struct hclge_fd_rule * rule)7237 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7238 				 struct hclge_fd_rule *rule)
7239 {
7240 	u16 addr_type = 0;
7241 
7242 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7243 		struct flow_match_control match;
7244 
7245 		flow_rule_match_control(flow, &match);
7246 		addr_type = match.key->addr_type;
7247 	}
7248 
7249 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7250 		struct flow_match_ipv4_addrs match;
7251 
7252 		flow_rule_match_ipv4_addrs(flow, &match);
7253 		rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7254 		rule->tuples_mask.src_ip[IPV4_INDEX] =
7255 						be32_to_cpu(match.mask->src);
7256 		rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7257 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
7258 						be32_to_cpu(match.mask->dst);
7259 	} else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7260 		struct flow_match_ipv6_addrs match;
7261 
7262 		flow_rule_match_ipv6_addrs(flow, &match);
7263 		be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7264 				  IPV6_SIZE);
7265 		be32_to_cpu_array(rule->tuples_mask.src_ip,
7266 				  match.mask->src.s6_addr32, IPV6_SIZE);
7267 		be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7268 				  IPV6_SIZE);
7269 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
7270 				  match.mask->dst.s6_addr32, IPV6_SIZE);
7271 	} else {
7272 		rule->unused_tuple |= BIT(INNER_SRC_IP);
7273 		rule->unused_tuple |= BIT(INNER_DST_IP);
7274 	}
7275 }
7276 
hclge_get_cls_key_port(const struct flow_rule * flow,struct hclge_fd_rule * rule)7277 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7278 				   struct hclge_fd_rule *rule)
7279 {
7280 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7281 		struct flow_match_ports match;
7282 
7283 		flow_rule_match_ports(flow, &match);
7284 
7285 		rule->tuples.src_port = be16_to_cpu(match.key->src);
7286 		rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7287 		rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7288 		rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7289 	} else {
7290 		rule->unused_tuple |= BIT(INNER_SRC_PORT);
7291 		rule->unused_tuple |= BIT(INNER_DST_PORT);
7292 	}
7293 }
7294 
hclge_parse_cls_flower(struct hclge_dev * hdev,struct flow_cls_offload * cls_flower,struct hclge_fd_rule * rule)7295 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7296 				  struct flow_cls_offload *cls_flower,
7297 				  struct hclge_fd_rule *rule)
7298 {
7299 	struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7300 	struct flow_dissector *dissector = flow->match.dissector;
7301 
7302 	if (dissector->used_keys &
7303 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7304 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
7305 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7306 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
7307 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7308 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7309 	      BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7310 		dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7311 			dissector->used_keys);
7312 		return -EOPNOTSUPP;
7313 	}
7314 
7315 	hclge_get_cls_key_basic(flow, rule);
7316 	hclge_get_cls_key_mac(flow, rule);
7317 	hclge_get_cls_key_vlan(flow, rule);
7318 	hclge_get_cls_key_ip(flow, rule);
7319 	hclge_get_cls_key_port(flow, rule);
7320 
7321 	return 0;
7322 }
7323 
hclge_check_cls_flower(struct hclge_dev * hdev,struct flow_cls_offload * cls_flower,int tc)7324 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7325 				  struct flow_cls_offload *cls_flower, int tc)
7326 {
7327 	u32 prio = cls_flower->common.prio;
7328 
7329 	if (tc < 0 || tc > hdev->tc_max) {
7330 		dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7331 		return -EINVAL;
7332 	}
7333 
7334 	if (prio == 0 ||
7335 	    prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7336 		dev_err(&hdev->pdev->dev,
7337 			"prio %u should be in range[1, %u]\n",
7338 			prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7339 		return -EINVAL;
7340 	}
7341 
7342 	if (test_bit(prio - 1, hdev->fd_bmap)) {
7343 		dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7344 		return -EINVAL;
7345 	}
7346 	return 0;
7347 }
7348 
hclge_add_cls_flower(struct hnae3_handle * handle,struct flow_cls_offload * cls_flower,int tc)7349 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7350 				struct flow_cls_offload *cls_flower,
7351 				int tc)
7352 {
7353 	struct hclge_vport *vport = hclge_get_vport(handle);
7354 	struct hclge_dev *hdev = vport->back;
7355 	struct hclge_fd_rule *rule;
7356 	int ret;
7357 
7358 	ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7359 	if (ret) {
7360 		dev_err(&hdev->pdev->dev,
7361 			"failed to check cls flower params, ret = %d\n", ret);
7362 		return ret;
7363 	}
7364 
7365 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7366 	if (!rule)
7367 		return -ENOMEM;
7368 
7369 	ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7370 	if (ret) {
7371 		kfree(rule);
7372 		return ret;
7373 	}
7374 
7375 	rule->action = HCLGE_FD_ACTION_SELECT_TC;
7376 	rule->cls_flower.tc = tc;
7377 	rule->location = cls_flower->common.prio - 1;
7378 	rule->vf_id = 0;
7379 	rule->cls_flower.cookie = cls_flower->cookie;
7380 	rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7381 
7382 	ret = hclge_add_fd_entry_common(hdev, rule);
7383 	if (ret)
7384 		kfree(rule);
7385 
7386 	return ret;
7387 }
7388 
hclge_find_cls_flower(struct hclge_dev * hdev,unsigned long cookie)7389 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7390 						   unsigned long cookie)
7391 {
7392 	struct hclge_fd_rule *rule;
7393 	struct hlist_node *node;
7394 
7395 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7396 		if (rule->cls_flower.cookie == cookie)
7397 			return rule;
7398 	}
7399 
7400 	return NULL;
7401 }
7402 
hclge_del_cls_flower(struct hnae3_handle * handle,struct flow_cls_offload * cls_flower)7403 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7404 				struct flow_cls_offload *cls_flower)
7405 {
7406 	struct hclge_vport *vport = hclge_get_vport(handle);
7407 	struct hclge_dev *hdev = vport->back;
7408 	struct hclge_fd_rule *rule;
7409 	int ret;
7410 
7411 	spin_lock_bh(&hdev->fd_rule_lock);
7412 
7413 	rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7414 	if (!rule) {
7415 		spin_unlock_bh(&hdev->fd_rule_lock);
7416 		return -EINVAL;
7417 	}
7418 
7419 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7420 				   NULL, false);
7421 	if (ret) {
7422 		spin_unlock_bh(&hdev->fd_rule_lock);
7423 		return ret;
7424 	}
7425 
7426 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7427 	spin_unlock_bh(&hdev->fd_rule_lock);
7428 
7429 	return 0;
7430 }
7431 
hclge_sync_fd_list(struct hclge_dev * hdev,struct hlist_head * hlist)7432 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7433 {
7434 	struct hclge_fd_rule *rule;
7435 	struct hlist_node *node;
7436 	int ret = 0;
7437 
7438 	if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7439 		return;
7440 
7441 	spin_lock_bh(&hdev->fd_rule_lock);
7442 
7443 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7444 		switch (rule->state) {
7445 		case HCLGE_FD_TO_ADD:
7446 			ret = hclge_fd_config_rule(hdev, rule);
7447 			if (ret)
7448 				goto out;
7449 			rule->state = HCLGE_FD_ACTIVE;
7450 			break;
7451 		case HCLGE_FD_TO_DEL:
7452 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7453 						   rule->location, NULL, false);
7454 			if (ret)
7455 				goto out;
7456 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7457 			hclge_fd_free_node(hdev, rule);
7458 			break;
7459 		default:
7460 			break;
7461 		}
7462 	}
7463 
7464 out:
7465 	if (ret)
7466 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7467 
7468 	spin_unlock_bh(&hdev->fd_rule_lock);
7469 }
7470 
hclge_sync_fd_table(struct hclge_dev * hdev)7471 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7472 {
7473 	if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7474 		bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7475 
7476 		hclge_clear_fd_rules_in_list(hdev, clear_list);
7477 	}
7478 
7479 	hclge_sync_fd_user_def_cfg(hdev, false);
7480 
7481 	hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7482 }
7483 
hclge_get_hw_reset_stat(struct hnae3_handle * handle)7484 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7485 {
7486 	struct hclge_vport *vport = hclge_get_vport(handle);
7487 	struct hclge_dev *hdev = vport->back;
7488 
7489 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7490 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7491 }
7492 
hclge_get_cmdq_stat(struct hnae3_handle * handle)7493 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7494 {
7495 	struct hclge_vport *vport = hclge_get_vport(handle);
7496 	struct hclge_dev *hdev = vport->back;
7497 
7498 	return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7499 }
7500 
hclge_ae_dev_resetting(struct hnae3_handle * handle)7501 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7502 {
7503 	struct hclge_vport *vport = hclge_get_vport(handle);
7504 	struct hclge_dev *hdev = vport->back;
7505 
7506 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7507 }
7508 
hclge_ae_dev_reset_cnt(struct hnae3_handle * handle)7509 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7510 {
7511 	struct hclge_vport *vport = hclge_get_vport(handle);
7512 	struct hclge_dev *hdev = vport->back;
7513 
7514 	return hdev->rst_stats.hw_reset_done_cnt;
7515 }
7516 
hclge_enable_fd(struct hnae3_handle * handle,bool enable)7517 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7518 {
7519 	struct hclge_vport *vport = hclge_get_vport(handle);
7520 	struct hclge_dev *hdev = vport->back;
7521 
7522 	hdev->fd_en = enable;
7523 
7524 	if (!enable)
7525 		set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7526 	else
7527 		hclge_restore_fd_entries(handle);
7528 
7529 	hclge_task_schedule(hdev, 0);
7530 }
7531 
hclge_cfg_mac_mode(struct hclge_dev * hdev,bool enable)7532 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7533 {
7534 	struct hclge_desc desc;
7535 	struct hclge_config_mac_mode_cmd *req =
7536 		(struct hclge_config_mac_mode_cmd *)desc.data;
7537 	u32 loop_en = 0;
7538 	int ret;
7539 
7540 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7541 
7542 	if (enable) {
7543 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7544 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7545 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7546 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7547 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7548 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7549 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7550 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7551 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7552 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7553 	}
7554 
7555 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7556 
7557 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7558 	if (ret)
7559 		dev_err(&hdev->pdev->dev,
7560 			"mac enable fail, ret =%d.\n", ret);
7561 }
7562 
hclge_config_switch_param(struct hclge_dev * hdev,int vfid,u8 switch_param,u8 param_mask)7563 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7564 				     u8 switch_param, u8 param_mask)
7565 {
7566 	struct hclge_mac_vlan_switch_cmd *req;
7567 	struct hclge_desc desc;
7568 	u32 func_id;
7569 	int ret;
7570 
7571 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7572 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7573 
7574 	/* read current config parameter */
7575 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7576 				   true);
7577 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7578 	req->func_id = cpu_to_le32(func_id);
7579 
7580 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7581 	if (ret) {
7582 		dev_err(&hdev->pdev->dev,
7583 			"read mac vlan switch parameter fail, ret = %d\n", ret);
7584 		return ret;
7585 	}
7586 
7587 	/* modify and write new config parameter */
7588 	hclge_cmd_reuse_desc(&desc, false);
7589 	req->switch_param = (req->switch_param & param_mask) | switch_param;
7590 	req->param_mask = param_mask;
7591 
7592 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7593 	if (ret)
7594 		dev_err(&hdev->pdev->dev,
7595 			"set mac vlan switch parameter fail, ret = %d\n", ret);
7596 	return ret;
7597 }
7598 
hclge_phy_link_status_wait(struct hclge_dev * hdev,int link_ret)7599 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7600 				       int link_ret)
7601 {
7602 #define HCLGE_PHY_LINK_STATUS_NUM  200
7603 
7604 	struct phy_device *phydev = hdev->hw.mac.phydev;
7605 	int i = 0;
7606 	int ret;
7607 
7608 	do {
7609 		ret = phy_read_status(phydev);
7610 		if (ret) {
7611 			dev_err(&hdev->pdev->dev,
7612 				"phy update link status fail, ret = %d\n", ret);
7613 			return;
7614 		}
7615 
7616 		if (phydev->link == link_ret)
7617 			break;
7618 
7619 		msleep(HCLGE_LINK_STATUS_MS);
7620 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7621 }
7622 
hclge_mac_link_status_wait(struct hclge_dev * hdev,int link_ret)7623 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7624 {
7625 #define HCLGE_MAC_LINK_STATUS_NUM  100
7626 
7627 	int link_status;
7628 	int i = 0;
7629 	int ret;
7630 
7631 	do {
7632 		ret = hclge_get_mac_link_status(hdev, &link_status);
7633 		if (ret)
7634 			return ret;
7635 		if (link_status == link_ret)
7636 			return 0;
7637 
7638 		msleep(HCLGE_LINK_STATUS_MS);
7639 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7640 	return -EBUSY;
7641 }
7642 
hclge_mac_phy_link_status_wait(struct hclge_dev * hdev,bool en,bool is_phy)7643 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7644 					  bool is_phy)
7645 {
7646 	int link_ret;
7647 
7648 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7649 
7650 	if (is_phy)
7651 		hclge_phy_link_status_wait(hdev, link_ret);
7652 
7653 	return hclge_mac_link_status_wait(hdev, link_ret);
7654 }
7655 
hclge_set_app_loopback(struct hclge_dev * hdev,bool en)7656 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7657 {
7658 	struct hclge_config_mac_mode_cmd *req;
7659 	struct hclge_desc desc;
7660 	u32 loop_en;
7661 	int ret;
7662 
7663 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7664 	/* 1 Read out the MAC mode config at first */
7665 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7666 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7667 	if (ret) {
7668 		dev_err(&hdev->pdev->dev,
7669 			"mac loopback get fail, ret =%d.\n", ret);
7670 		return ret;
7671 	}
7672 
7673 	/* 2 Then setup the loopback flag */
7674 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7675 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7676 
7677 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7678 
7679 	/* 3 Config mac work mode with loopback flag
7680 	 * and its original configure parameters
7681 	 */
7682 	hclge_cmd_reuse_desc(&desc, false);
7683 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7684 	if (ret)
7685 		dev_err(&hdev->pdev->dev,
7686 			"mac loopback set fail, ret =%d.\n", ret);
7687 	return ret;
7688 }
7689 
hclge_cfg_common_loopback(struct hclge_dev * hdev,bool en,enum hnae3_loop loop_mode)7690 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7691 				     enum hnae3_loop loop_mode)
7692 {
7693 #define HCLGE_COMMON_LB_RETRY_MS	10
7694 #define HCLGE_COMMON_LB_RETRY_NUM	100
7695 
7696 	struct hclge_common_lb_cmd *req;
7697 	struct hclge_desc desc;
7698 	int ret, i = 0;
7699 	u8 loop_mode_b;
7700 
7701 	req = (struct hclge_common_lb_cmd *)desc.data;
7702 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7703 
7704 	switch (loop_mode) {
7705 	case HNAE3_LOOP_SERIAL_SERDES:
7706 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7707 		break;
7708 	case HNAE3_LOOP_PARALLEL_SERDES:
7709 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7710 		break;
7711 	case HNAE3_LOOP_PHY:
7712 		loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7713 		break;
7714 	default:
7715 		dev_err(&hdev->pdev->dev,
7716 			"unsupported common loopback mode %d\n", loop_mode);
7717 		return -ENOTSUPP;
7718 	}
7719 
7720 	if (en) {
7721 		req->enable = loop_mode_b;
7722 		req->mask = loop_mode_b;
7723 	} else {
7724 		req->mask = loop_mode_b;
7725 	}
7726 
7727 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7728 	if (ret) {
7729 		dev_err(&hdev->pdev->dev,
7730 			"common loopback set fail, ret = %d\n", ret);
7731 		return ret;
7732 	}
7733 
7734 	do {
7735 		msleep(HCLGE_COMMON_LB_RETRY_MS);
7736 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7737 					   true);
7738 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7739 		if (ret) {
7740 			dev_err(&hdev->pdev->dev,
7741 				"common loopback get, ret = %d\n", ret);
7742 			return ret;
7743 		}
7744 	} while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7745 		 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7746 
7747 	if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7748 		dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7749 		return -EBUSY;
7750 	} else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7751 		dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7752 		return -EIO;
7753 	}
7754 	return ret;
7755 }
7756 
hclge_set_common_loopback(struct hclge_dev * hdev,bool en,enum hnae3_loop loop_mode)7757 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7758 				     enum hnae3_loop loop_mode)
7759 {
7760 	int ret;
7761 
7762 	ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7763 	if (ret)
7764 		return ret;
7765 
7766 	hclge_cfg_mac_mode(hdev, en);
7767 
7768 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7769 	if (ret)
7770 		dev_err(&hdev->pdev->dev,
7771 			"serdes loopback config mac mode timeout\n");
7772 
7773 	return ret;
7774 }
7775 
hclge_enable_phy_loopback(struct hclge_dev * hdev,struct phy_device * phydev)7776 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7777 				     struct phy_device *phydev)
7778 {
7779 	int ret;
7780 
7781 	if (!phydev->suspended) {
7782 		ret = phy_suspend(phydev);
7783 		if (ret)
7784 			return ret;
7785 	}
7786 
7787 	ret = phy_resume(phydev);
7788 	if (ret)
7789 		return ret;
7790 
7791 	return phy_loopback(phydev, true);
7792 }
7793 
hclge_disable_phy_loopback(struct hclge_dev * hdev,struct phy_device * phydev)7794 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7795 				      struct phy_device *phydev)
7796 {
7797 	int ret;
7798 
7799 	ret = phy_loopback(phydev, false);
7800 	if (ret)
7801 		return ret;
7802 
7803 	return phy_suspend(phydev);
7804 }
7805 
hclge_set_phy_loopback(struct hclge_dev * hdev,bool en)7806 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7807 {
7808 	struct phy_device *phydev = hdev->hw.mac.phydev;
7809 	int ret;
7810 
7811 	if (!phydev) {
7812 		if (hnae3_dev_phy_imp_supported(hdev))
7813 			return hclge_set_common_loopback(hdev, en,
7814 							 HNAE3_LOOP_PHY);
7815 		return -ENOTSUPP;
7816 	}
7817 
7818 	if (en)
7819 		ret = hclge_enable_phy_loopback(hdev, phydev);
7820 	else
7821 		ret = hclge_disable_phy_loopback(hdev, phydev);
7822 	if (ret) {
7823 		dev_err(&hdev->pdev->dev,
7824 			"set phy loopback fail, ret = %d\n", ret);
7825 		return ret;
7826 	}
7827 
7828 	hclge_cfg_mac_mode(hdev, en);
7829 
7830 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7831 	if (ret)
7832 		dev_err(&hdev->pdev->dev,
7833 			"phy loopback config mac mode timeout\n");
7834 
7835 	return ret;
7836 }
7837 
hclge_tqp_enable_cmd_send(struct hclge_dev * hdev,u16 tqp_id,u16 stream_id,bool enable)7838 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7839 				     u16 stream_id, bool enable)
7840 {
7841 	struct hclge_desc desc;
7842 	struct hclge_cfg_com_tqp_queue_cmd *req =
7843 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7844 
7845 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7846 	req->tqp_id = cpu_to_le16(tqp_id);
7847 	req->stream_id = cpu_to_le16(stream_id);
7848 	if (enable)
7849 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7850 
7851 	return hclge_cmd_send(&hdev->hw, &desc, 1);
7852 }
7853 
hclge_tqp_enable(struct hnae3_handle * handle,bool enable)7854 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7855 {
7856 	struct hclge_vport *vport = hclge_get_vport(handle);
7857 	struct hclge_dev *hdev = vport->back;
7858 	int ret;
7859 	u16 i;
7860 
7861 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
7862 		ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7863 		if (ret)
7864 			return ret;
7865 	}
7866 	return 0;
7867 }
7868 
hclge_set_loopback(struct hnae3_handle * handle,enum hnae3_loop loop_mode,bool en)7869 static int hclge_set_loopback(struct hnae3_handle *handle,
7870 			      enum hnae3_loop loop_mode, bool en)
7871 {
7872 	struct hclge_vport *vport = hclge_get_vport(handle);
7873 	struct hclge_dev *hdev = vport->back;
7874 	int ret;
7875 
7876 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7877 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7878 	 * the same, the packets are looped back in the SSU. If SSU loopback
7879 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7880 	 */
7881 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7882 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7883 
7884 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7885 						HCLGE_SWITCH_ALW_LPBK_MASK);
7886 		if (ret)
7887 			return ret;
7888 	}
7889 
7890 	switch (loop_mode) {
7891 	case HNAE3_LOOP_APP:
7892 		ret = hclge_set_app_loopback(hdev, en);
7893 		break;
7894 	case HNAE3_LOOP_SERIAL_SERDES:
7895 	case HNAE3_LOOP_PARALLEL_SERDES:
7896 		ret = hclge_set_common_loopback(hdev, en, loop_mode);
7897 		break;
7898 	case HNAE3_LOOP_PHY:
7899 		ret = hclge_set_phy_loopback(hdev, en);
7900 		break;
7901 	default:
7902 		ret = -ENOTSUPP;
7903 		dev_err(&hdev->pdev->dev,
7904 			"loop_mode %d is not supported\n", loop_mode);
7905 		break;
7906 	}
7907 
7908 	if (ret)
7909 		return ret;
7910 
7911 	ret = hclge_tqp_enable(handle, en);
7912 	if (ret)
7913 		dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
7914 			en ? "enable" : "disable", ret);
7915 
7916 	return ret;
7917 }
7918 
hclge_set_default_loopback(struct hclge_dev * hdev)7919 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7920 {
7921 	int ret;
7922 
7923 	ret = hclge_set_app_loopback(hdev, false);
7924 	if (ret)
7925 		return ret;
7926 
7927 	ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7928 	if (ret)
7929 		return ret;
7930 
7931 	return hclge_cfg_common_loopback(hdev, false,
7932 					 HNAE3_LOOP_PARALLEL_SERDES);
7933 }
7934 
hclge_reset_tqp_stats(struct hnae3_handle * handle)7935 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7936 {
7937 	struct hclge_vport *vport = hclge_get_vport(handle);
7938 	struct hnae3_knic_private_info *kinfo;
7939 	struct hnae3_queue *queue;
7940 	struct hclge_tqp *tqp;
7941 	int i;
7942 
7943 	kinfo = &vport->nic.kinfo;
7944 	for (i = 0; i < kinfo->num_tqps; i++) {
7945 		queue = handle->kinfo.tqp[i];
7946 		tqp = container_of(queue, struct hclge_tqp, q);
7947 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7948 	}
7949 }
7950 
hclge_flush_link_update(struct hclge_dev * hdev)7951 static void hclge_flush_link_update(struct hclge_dev *hdev)
7952 {
7953 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
7954 
7955 	unsigned long last = hdev->serv_processed_cnt;
7956 	int i = 0;
7957 
7958 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7959 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7960 	       last == hdev->serv_processed_cnt)
7961 		usleep_range(1, 1);
7962 }
7963 
hclge_set_timer_task(struct hnae3_handle * handle,bool enable)7964 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7965 {
7966 	struct hclge_vport *vport = hclge_get_vport(handle);
7967 	struct hclge_dev *hdev = vport->back;
7968 
7969 	if (enable) {
7970 		hclge_task_schedule(hdev, 0);
7971 	} else {
7972 		/* Set the DOWN flag here to disable link updating */
7973 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
7974 
7975 		/* flush memory to make sure DOWN is seen by service task */
7976 		smp_mb__before_atomic();
7977 		hclge_flush_link_update(hdev);
7978 	}
7979 }
7980 
hclge_ae_start(struct hnae3_handle * handle)7981 static int hclge_ae_start(struct hnae3_handle *handle)
7982 {
7983 	struct hclge_vport *vport = hclge_get_vport(handle);
7984 	struct hclge_dev *hdev = vport->back;
7985 
7986 	/* mac enable */
7987 	hclge_cfg_mac_mode(hdev, true);
7988 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7989 	hdev->hw.mac.link = 0;
7990 
7991 	/* reset tqp stats */
7992 	hclge_reset_tqp_stats(handle);
7993 
7994 	hclge_mac_start_phy(hdev);
7995 
7996 	return 0;
7997 }
7998 
hclge_ae_stop(struct hnae3_handle * handle)7999 static void hclge_ae_stop(struct hnae3_handle *handle)
8000 {
8001 	struct hclge_vport *vport = hclge_get_vport(handle);
8002 	struct hclge_dev *hdev = vport->back;
8003 
8004 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
8005 	spin_lock_bh(&hdev->fd_rule_lock);
8006 	hclge_clear_arfs_rules(hdev);
8007 	spin_unlock_bh(&hdev->fd_rule_lock);
8008 
8009 	/* If it is not PF reset, the firmware will disable the MAC,
8010 	 * so it only need to stop phy here.
8011 	 */
8012 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8013 	    hdev->reset_type != HNAE3_FUNC_RESET) {
8014 		hclge_mac_stop_phy(hdev);
8015 		hclge_update_link_status(hdev);
8016 		return;
8017 	}
8018 
8019 	hclge_reset_tqp(handle);
8020 
8021 	hclge_config_mac_tnl_int(hdev, false);
8022 
8023 	/* Mac disable */
8024 	hclge_cfg_mac_mode(hdev, false);
8025 
8026 	hclge_mac_stop_phy(hdev);
8027 
8028 	/* reset tqp stats */
8029 	hclge_reset_tqp_stats(handle);
8030 	hclge_update_link_status(hdev);
8031 }
8032 
hclge_vport_start(struct hclge_vport * vport)8033 int hclge_vport_start(struct hclge_vport *vport)
8034 {
8035 	struct hclge_dev *hdev = vport->back;
8036 
8037 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8038 	vport->last_active_jiffies = jiffies;
8039 
8040 	if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8041 		if (vport->vport_id) {
8042 			hclge_restore_mac_table_common(vport);
8043 			hclge_restore_vport_vlan_table(vport);
8044 		} else {
8045 			hclge_restore_hw_table(hdev);
8046 		}
8047 	}
8048 
8049 	clear_bit(vport->vport_id, hdev->vport_config_block);
8050 
8051 	return 0;
8052 }
8053 
hclge_vport_stop(struct hclge_vport * vport)8054 void hclge_vport_stop(struct hclge_vport *vport)
8055 {
8056 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8057 }
8058 
hclge_client_start(struct hnae3_handle * handle)8059 static int hclge_client_start(struct hnae3_handle *handle)
8060 {
8061 	struct hclge_vport *vport = hclge_get_vport(handle);
8062 
8063 	return hclge_vport_start(vport);
8064 }
8065 
hclge_client_stop(struct hnae3_handle * handle)8066 static void hclge_client_stop(struct hnae3_handle *handle)
8067 {
8068 	struct hclge_vport *vport = hclge_get_vport(handle);
8069 
8070 	hclge_vport_stop(vport);
8071 }
8072 
hclge_get_mac_vlan_cmd_status(struct hclge_vport * vport,u16 cmdq_resp,u8 resp_code,enum hclge_mac_vlan_tbl_opcode op)8073 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8074 					 u16 cmdq_resp, u8  resp_code,
8075 					 enum hclge_mac_vlan_tbl_opcode op)
8076 {
8077 	struct hclge_dev *hdev = vport->back;
8078 
8079 	if (cmdq_resp) {
8080 		dev_err(&hdev->pdev->dev,
8081 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8082 			cmdq_resp);
8083 		return -EIO;
8084 	}
8085 
8086 	if (op == HCLGE_MAC_VLAN_ADD) {
8087 		if (!resp_code || resp_code == 1)
8088 			return 0;
8089 		else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8090 			 resp_code == HCLGE_ADD_MC_OVERFLOW)
8091 			return -ENOSPC;
8092 
8093 		dev_err(&hdev->pdev->dev,
8094 			"add mac addr failed for undefined, code=%u.\n",
8095 			resp_code);
8096 		return -EIO;
8097 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
8098 		if (!resp_code) {
8099 			return 0;
8100 		} else if (resp_code == 1) {
8101 			dev_dbg(&hdev->pdev->dev,
8102 				"remove mac addr failed for miss.\n");
8103 			return -ENOENT;
8104 		}
8105 
8106 		dev_err(&hdev->pdev->dev,
8107 			"remove mac addr failed for undefined, code=%u.\n",
8108 			resp_code);
8109 		return -EIO;
8110 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
8111 		if (!resp_code) {
8112 			return 0;
8113 		} else if (resp_code == 1) {
8114 			dev_dbg(&hdev->pdev->dev,
8115 				"lookup mac addr failed for miss.\n");
8116 			return -ENOENT;
8117 		}
8118 
8119 		dev_err(&hdev->pdev->dev,
8120 			"lookup mac addr failed for undefined, code=%u.\n",
8121 			resp_code);
8122 		return -EIO;
8123 	}
8124 
8125 	dev_err(&hdev->pdev->dev,
8126 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8127 
8128 	return -EINVAL;
8129 }
8130 
hclge_update_desc_vfid(struct hclge_desc * desc,int vfid,bool clr)8131 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8132 {
8133 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8134 
8135 	unsigned int word_num;
8136 	unsigned int bit_num;
8137 
8138 	if (vfid > 255 || vfid < 0)
8139 		return -EIO;
8140 
8141 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8142 		word_num = vfid / 32;
8143 		bit_num  = vfid % 32;
8144 		if (clr)
8145 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8146 		else
8147 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8148 	} else {
8149 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8150 		bit_num  = vfid % 32;
8151 		if (clr)
8152 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8153 		else
8154 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8155 	}
8156 
8157 	return 0;
8158 }
8159 
hclge_is_all_function_id_zero(struct hclge_desc * desc)8160 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8161 {
8162 #define HCLGE_DESC_NUMBER 3
8163 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8164 	int i, j;
8165 
8166 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8167 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8168 			if (desc[i].data[j])
8169 				return false;
8170 
8171 	return true;
8172 }
8173 
hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd * new_req,const u8 * addr,bool is_mc)8174 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8175 				   const u8 *addr, bool is_mc)
8176 {
8177 	const unsigned char *mac_addr = addr;
8178 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8179 		       (mac_addr[0]) | (mac_addr[1] << 8);
8180 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
8181 
8182 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8183 	if (is_mc) {
8184 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8185 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8186 	}
8187 
8188 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8189 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8190 }
8191 
hclge_remove_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req)8192 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8193 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
8194 {
8195 	struct hclge_dev *hdev = vport->back;
8196 	struct hclge_desc desc;
8197 	u8 resp_code;
8198 	u16 retval;
8199 	int ret;
8200 
8201 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8202 
8203 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8204 
8205 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8206 	if (ret) {
8207 		dev_err(&hdev->pdev->dev,
8208 			"del mac addr failed for cmd_send, ret =%d.\n",
8209 			ret);
8210 		return ret;
8211 	}
8212 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8213 	retval = le16_to_cpu(desc.retval);
8214 
8215 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8216 					     HCLGE_MAC_VLAN_REMOVE);
8217 }
8218 
hclge_lookup_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req,struct hclge_desc * desc,bool is_mc)8219 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8220 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
8221 				     struct hclge_desc *desc,
8222 				     bool is_mc)
8223 {
8224 	struct hclge_dev *hdev = vport->back;
8225 	u8 resp_code;
8226 	u16 retval;
8227 	int ret;
8228 
8229 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8230 	if (is_mc) {
8231 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8232 		memcpy(desc[0].data,
8233 		       req,
8234 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8235 		hclge_cmd_setup_basic_desc(&desc[1],
8236 					   HCLGE_OPC_MAC_VLAN_ADD,
8237 					   true);
8238 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8239 		hclge_cmd_setup_basic_desc(&desc[2],
8240 					   HCLGE_OPC_MAC_VLAN_ADD,
8241 					   true);
8242 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
8243 	} else {
8244 		memcpy(desc[0].data,
8245 		       req,
8246 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8247 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
8248 	}
8249 	if (ret) {
8250 		dev_err(&hdev->pdev->dev,
8251 			"lookup mac addr failed for cmd_send, ret =%d.\n",
8252 			ret);
8253 		return ret;
8254 	}
8255 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8256 	retval = le16_to_cpu(desc[0].retval);
8257 
8258 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8259 					     HCLGE_MAC_VLAN_LKUP);
8260 }
8261 
hclge_add_mac_vlan_tbl(struct hclge_vport * vport,struct hclge_mac_vlan_tbl_entry_cmd * req,struct hclge_desc * mc_desc)8262 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8263 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
8264 				  struct hclge_desc *mc_desc)
8265 {
8266 	struct hclge_dev *hdev = vport->back;
8267 	int cfg_status;
8268 	u8 resp_code;
8269 	u16 retval;
8270 	int ret;
8271 
8272 	if (!mc_desc) {
8273 		struct hclge_desc desc;
8274 
8275 		hclge_cmd_setup_basic_desc(&desc,
8276 					   HCLGE_OPC_MAC_VLAN_ADD,
8277 					   false);
8278 		memcpy(desc.data, req,
8279 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8280 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8281 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8282 		retval = le16_to_cpu(desc.retval);
8283 
8284 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8285 							   resp_code,
8286 							   HCLGE_MAC_VLAN_ADD);
8287 	} else {
8288 		hclge_cmd_reuse_desc(&mc_desc[0], false);
8289 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8290 		hclge_cmd_reuse_desc(&mc_desc[1], false);
8291 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8292 		hclge_cmd_reuse_desc(&mc_desc[2], false);
8293 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8294 		memcpy(mc_desc[0].data, req,
8295 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8296 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8297 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8298 		retval = le16_to_cpu(mc_desc[0].retval);
8299 
8300 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8301 							   resp_code,
8302 							   HCLGE_MAC_VLAN_ADD);
8303 	}
8304 
8305 	if (ret) {
8306 		dev_err(&hdev->pdev->dev,
8307 			"add mac addr failed for cmd_send, ret =%d.\n",
8308 			ret);
8309 		return ret;
8310 	}
8311 
8312 	return cfg_status;
8313 }
8314 
hclge_set_umv_space(struct hclge_dev * hdev,u16 space_size,u16 * allocated_size)8315 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8316 			       u16 *allocated_size)
8317 {
8318 	struct hclge_umv_spc_alc_cmd *req;
8319 	struct hclge_desc desc;
8320 	int ret;
8321 
8322 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8323 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8324 
8325 	req->space_size = cpu_to_le32(space_size);
8326 
8327 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8328 	if (ret) {
8329 		dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8330 			ret);
8331 		return ret;
8332 	}
8333 
8334 	*allocated_size = le32_to_cpu(desc.data[1]);
8335 
8336 	return 0;
8337 }
8338 
hclge_init_umv_space(struct hclge_dev * hdev)8339 static int hclge_init_umv_space(struct hclge_dev *hdev)
8340 {
8341 	u16 allocated_size = 0;
8342 	int ret;
8343 
8344 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8345 	if (ret)
8346 		return ret;
8347 
8348 	if (allocated_size < hdev->wanted_umv_size)
8349 		dev_warn(&hdev->pdev->dev,
8350 			 "failed to alloc umv space, want %u, get %u\n",
8351 			 hdev->wanted_umv_size, allocated_size);
8352 
8353 	hdev->max_umv_size = allocated_size;
8354 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8355 	hdev->share_umv_size = hdev->priv_umv_size +
8356 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8357 
8358 	return 0;
8359 }
8360 
hclge_reset_umv_space(struct hclge_dev * hdev)8361 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8362 {
8363 	struct hclge_vport *vport;
8364 	int i;
8365 
8366 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8367 		vport = &hdev->vport[i];
8368 		vport->used_umv_num = 0;
8369 	}
8370 
8371 	mutex_lock(&hdev->vport_lock);
8372 	hdev->share_umv_size = hdev->priv_umv_size +
8373 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8374 	mutex_unlock(&hdev->vport_lock);
8375 }
8376 
hclge_is_umv_space_full(struct hclge_vport * vport,bool need_lock)8377 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8378 {
8379 	struct hclge_dev *hdev = vport->back;
8380 	bool is_full;
8381 
8382 	if (need_lock)
8383 		mutex_lock(&hdev->vport_lock);
8384 
8385 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8386 		   hdev->share_umv_size == 0);
8387 
8388 	if (need_lock)
8389 		mutex_unlock(&hdev->vport_lock);
8390 
8391 	return is_full;
8392 }
8393 
hclge_update_umv_space(struct hclge_vport * vport,bool is_free)8394 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8395 {
8396 	struct hclge_dev *hdev = vport->back;
8397 
8398 	if (is_free) {
8399 		if (vport->used_umv_num > hdev->priv_umv_size)
8400 			hdev->share_umv_size++;
8401 
8402 		if (vport->used_umv_num > 0)
8403 			vport->used_umv_num--;
8404 	} else {
8405 		if (vport->used_umv_num >= hdev->priv_umv_size &&
8406 		    hdev->share_umv_size > 0)
8407 			hdev->share_umv_size--;
8408 		vport->used_umv_num++;
8409 	}
8410 }
8411 
hclge_find_mac_node(struct list_head * list,const u8 * mac_addr)8412 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8413 						  const u8 *mac_addr)
8414 {
8415 	struct hclge_mac_node *mac_node, *tmp;
8416 
8417 	list_for_each_entry_safe(mac_node, tmp, list, node)
8418 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8419 			return mac_node;
8420 
8421 	return NULL;
8422 }
8423 
hclge_update_mac_node(struct hclge_mac_node * mac_node,enum HCLGE_MAC_NODE_STATE state)8424 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8425 				  enum HCLGE_MAC_NODE_STATE state)
8426 {
8427 	switch (state) {
8428 	/* from set_rx_mode or tmp_add_list */
8429 	case HCLGE_MAC_TO_ADD:
8430 		if (mac_node->state == HCLGE_MAC_TO_DEL)
8431 			mac_node->state = HCLGE_MAC_ACTIVE;
8432 		break;
8433 	/* only from set_rx_mode */
8434 	case HCLGE_MAC_TO_DEL:
8435 		if (mac_node->state == HCLGE_MAC_TO_ADD) {
8436 			list_del(&mac_node->node);
8437 			kfree(mac_node);
8438 		} else {
8439 			mac_node->state = HCLGE_MAC_TO_DEL;
8440 		}
8441 		break;
8442 	/* only from tmp_add_list, the mac_node->state won't be
8443 	 * ACTIVE.
8444 	 */
8445 	case HCLGE_MAC_ACTIVE:
8446 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8447 			mac_node->state = HCLGE_MAC_ACTIVE;
8448 
8449 		break;
8450 	}
8451 }
8452 
hclge_update_mac_list(struct hclge_vport * vport,enum HCLGE_MAC_NODE_STATE state,enum HCLGE_MAC_ADDR_TYPE mac_type,const unsigned char * addr)8453 int hclge_update_mac_list(struct hclge_vport *vport,
8454 			  enum HCLGE_MAC_NODE_STATE state,
8455 			  enum HCLGE_MAC_ADDR_TYPE mac_type,
8456 			  const unsigned char *addr)
8457 {
8458 	struct hclge_dev *hdev = vport->back;
8459 	struct hclge_mac_node *mac_node;
8460 	struct list_head *list;
8461 
8462 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8463 		&vport->uc_mac_list : &vport->mc_mac_list;
8464 
8465 	spin_lock_bh(&vport->mac_list_lock);
8466 
8467 	/* if the mac addr is already in the mac list, no need to add a new
8468 	 * one into it, just check the mac addr state, convert it to a new
8469 	 * state, or just remove it, or do nothing.
8470 	 */
8471 	mac_node = hclge_find_mac_node(list, addr);
8472 	if (mac_node) {
8473 		hclge_update_mac_node(mac_node, state);
8474 		spin_unlock_bh(&vport->mac_list_lock);
8475 		set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8476 		return 0;
8477 	}
8478 
8479 	/* if this address is never added, unnecessary to delete */
8480 	if (state == HCLGE_MAC_TO_DEL) {
8481 		spin_unlock_bh(&vport->mac_list_lock);
8482 		dev_err(&hdev->pdev->dev,
8483 			"failed to delete address %pM from mac list\n",
8484 			addr);
8485 		return -ENOENT;
8486 	}
8487 
8488 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8489 	if (!mac_node) {
8490 		spin_unlock_bh(&vport->mac_list_lock);
8491 		return -ENOMEM;
8492 	}
8493 
8494 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8495 
8496 	mac_node->state = state;
8497 	ether_addr_copy(mac_node->mac_addr, addr);
8498 	list_add_tail(&mac_node->node, list);
8499 
8500 	spin_unlock_bh(&vport->mac_list_lock);
8501 
8502 	return 0;
8503 }
8504 
hclge_add_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)8505 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8506 			     const unsigned char *addr)
8507 {
8508 	struct hclge_vport *vport = hclge_get_vport(handle);
8509 
8510 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8511 				     addr);
8512 }
8513 
hclge_add_uc_addr_common(struct hclge_vport * vport,const unsigned char * addr)8514 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8515 			     const unsigned char *addr)
8516 {
8517 	struct hclge_dev *hdev = vport->back;
8518 	struct hclge_mac_vlan_tbl_entry_cmd req;
8519 	struct hclge_desc desc;
8520 	u16 egress_port = 0;
8521 	int ret;
8522 
8523 	/* mac addr check */
8524 	if (is_zero_ether_addr(addr) ||
8525 	    is_broadcast_ether_addr(addr) ||
8526 	    is_multicast_ether_addr(addr)) {
8527 		dev_err(&hdev->pdev->dev,
8528 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8529 			 addr, is_zero_ether_addr(addr),
8530 			 is_broadcast_ether_addr(addr),
8531 			 is_multicast_ether_addr(addr));
8532 		return -EINVAL;
8533 	}
8534 
8535 	memset(&req, 0, sizeof(req));
8536 
8537 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8538 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8539 
8540 	req.egress_port = cpu_to_le16(egress_port);
8541 
8542 	hclge_prepare_mac_addr(&req, addr, false);
8543 
8544 	/* Lookup the mac address in the mac_vlan table, and add
8545 	 * it if the entry is inexistent. Repeated unicast entry
8546 	 * is not allowed in the mac vlan table.
8547 	 */
8548 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8549 	if (ret == -ENOENT) {
8550 		mutex_lock(&hdev->vport_lock);
8551 		if (!hclge_is_umv_space_full(vport, false)) {
8552 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8553 			if (!ret)
8554 				hclge_update_umv_space(vport, false);
8555 			mutex_unlock(&hdev->vport_lock);
8556 			return ret;
8557 		}
8558 		mutex_unlock(&hdev->vport_lock);
8559 
8560 		if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8561 			dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8562 				hdev->priv_umv_size);
8563 
8564 		return -ENOSPC;
8565 	}
8566 
8567 	/* check if we just hit the duplicate */
8568 	if (!ret) {
8569 		dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8570 			 vport->vport_id, addr);
8571 		return 0;
8572 	}
8573 
8574 	dev_err(&hdev->pdev->dev,
8575 		"PF failed to add unicast entry(%pM) in the MAC table\n",
8576 		addr);
8577 
8578 	return ret;
8579 }
8580 
hclge_rm_uc_addr(struct hnae3_handle * handle,const unsigned char * addr)8581 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8582 			    const unsigned char *addr)
8583 {
8584 	struct hclge_vport *vport = hclge_get_vport(handle);
8585 
8586 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8587 				     addr);
8588 }
8589 
hclge_rm_uc_addr_common(struct hclge_vport * vport,const unsigned char * addr)8590 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8591 			    const unsigned char *addr)
8592 {
8593 	struct hclge_dev *hdev = vport->back;
8594 	struct hclge_mac_vlan_tbl_entry_cmd req;
8595 	int ret;
8596 
8597 	/* mac addr check */
8598 	if (is_zero_ether_addr(addr) ||
8599 	    is_broadcast_ether_addr(addr) ||
8600 	    is_multicast_ether_addr(addr)) {
8601 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8602 			addr);
8603 		return -EINVAL;
8604 	}
8605 
8606 	memset(&req, 0, sizeof(req));
8607 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8608 	hclge_prepare_mac_addr(&req, addr, false);
8609 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
8610 	if (!ret) {
8611 		mutex_lock(&hdev->vport_lock);
8612 		hclge_update_umv_space(vport, true);
8613 		mutex_unlock(&hdev->vport_lock);
8614 	} else if (ret == -ENOENT) {
8615 		ret = 0;
8616 	}
8617 
8618 	return ret;
8619 }
8620 
hclge_add_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)8621 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8622 			     const unsigned char *addr)
8623 {
8624 	struct hclge_vport *vport = hclge_get_vport(handle);
8625 
8626 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8627 				     addr);
8628 }
8629 
hclge_add_mc_addr_common(struct hclge_vport * vport,const unsigned char * addr)8630 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8631 			     const unsigned char *addr)
8632 {
8633 	struct hclge_dev *hdev = vport->back;
8634 	struct hclge_mac_vlan_tbl_entry_cmd req;
8635 	struct hclge_desc desc[3];
8636 	int status;
8637 
8638 	/* mac addr check */
8639 	if (!is_multicast_ether_addr(addr)) {
8640 		dev_err(&hdev->pdev->dev,
8641 			"Add mc mac err! invalid mac:%pM.\n",
8642 			 addr);
8643 		return -EINVAL;
8644 	}
8645 	memset(&req, 0, sizeof(req));
8646 	hclge_prepare_mac_addr(&req, addr, true);
8647 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8648 	if (status) {
8649 		/* This mac addr do not exist, add new entry for it */
8650 		memset(desc[0].data, 0, sizeof(desc[0].data));
8651 		memset(desc[1].data, 0, sizeof(desc[0].data));
8652 		memset(desc[2].data, 0, sizeof(desc[0].data));
8653 	}
8654 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8655 	if (status)
8656 		return status;
8657 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8658 	/* if already overflow, not to print each time */
8659 	if (status == -ENOSPC &&
8660 	    !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8661 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8662 
8663 	return status;
8664 }
8665 
hclge_rm_mc_addr(struct hnae3_handle * handle,const unsigned char * addr)8666 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8667 			    const unsigned char *addr)
8668 {
8669 	struct hclge_vport *vport = hclge_get_vport(handle);
8670 
8671 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8672 				     addr);
8673 }
8674 
hclge_rm_mc_addr_common(struct hclge_vport * vport,const unsigned char * addr)8675 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8676 			    const unsigned char *addr)
8677 {
8678 	struct hclge_dev *hdev = vport->back;
8679 	struct hclge_mac_vlan_tbl_entry_cmd req;
8680 	enum hclge_cmd_status status;
8681 	struct hclge_desc desc[3];
8682 
8683 	/* mac addr check */
8684 	if (!is_multicast_ether_addr(addr)) {
8685 		dev_dbg(&hdev->pdev->dev,
8686 			"Remove mc mac err! invalid mac:%pM.\n",
8687 			 addr);
8688 		return -EINVAL;
8689 	}
8690 
8691 	memset(&req, 0, sizeof(req));
8692 	hclge_prepare_mac_addr(&req, addr, true);
8693 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8694 	if (!status) {
8695 		/* This mac addr exist, remove this handle's VFID for it */
8696 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8697 		if (status)
8698 			return status;
8699 
8700 		if (hclge_is_all_function_id_zero(desc))
8701 			/* All the vfid is zero, so need to delete this entry */
8702 			status = hclge_remove_mac_vlan_tbl(vport, &req);
8703 		else
8704 			/* Not all the vfid is zero, update the vfid */
8705 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8706 	} else if (status == -ENOENT) {
8707 		status = 0;
8708 	}
8709 
8710 	return status;
8711 }
8712 
hclge_sync_vport_mac_list(struct hclge_vport * vport,struct list_head * list,int (* sync)(struct hclge_vport *,const unsigned char *))8713 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8714 				      struct list_head *list,
8715 				      int (*sync)(struct hclge_vport *,
8716 						  const unsigned char *))
8717 {
8718 	struct hclge_mac_node *mac_node, *tmp;
8719 	int ret;
8720 
8721 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8722 		ret = sync(vport, mac_node->mac_addr);
8723 		if (!ret) {
8724 			mac_node->state = HCLGE_MAC_ACTIVE;
8725 		} else {
8726 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8727 				&vport->state);
8728 			break;
8729 		}
8730 	}
8731 }
8732 
hclge_unsync_vport_mac_list(struct hclge_vport * vport,struct list_head * list,int (* unsync)(struct hclge_vport *,const unsigned char *))8733 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8734 					struct list_head *list,
8735 					int (*unsync)(struct hclge_vport *,
8736 						      const unsigned char *))
8737 {
8738 	struct hclge_mac_node *mac_node, *tmp;
8739 	int ret;
8740 
8741 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8742 		ret = unsync(vport, mac_node->mac_addr);
8743 		if (!ret || ret == -ENOENT) {
8744 			list_del(&mac_node->node);
8745 			kfree(mac_node);
8746 		} else {
8747 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8748 				&vport->state);
8749 			break;
8750 		}
8751 	}
8752 }
8753 
hclge_sync_from_add_list(struct list_head * add_list,struct list_head * mac_list)8754 static bool hclge_sync_from_add_list(struct list_head *add_list,
8755 				     struct list_head *mac_list)
8756 {
8757 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8758 	bool all_added = true;
8759 
8760 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8761 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8762 			all_added = false;
8763 
8764 		/* if the mac address from tmp_add_list is not in the
8765 		 * uc/mc_mac_list, it means have received a TO_DEL request
8766 		 * during the time window of adding the mac address into mac
8767 		 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8768 		 * then it will be removed at next time. else it must be TO_ADD,
8769 		 * this address hasn't been added into mac table,
8770 		 * so just remove the mac node.
8771 		 */
8772 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8773 		if (new_node) {
8774 			hclge_update_mac_node(new_node, mac_node->state);
8775 			list_del(&mac_node->node);
8776 			kfree(mac_node);
8777 		} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8778 			mac_node->state = HCLGE_MAC_TO_DEL;
8779 			list_del(&mac_node->node);
8780 			list_add_tail(&mac_node->node, mac_list);
8781 		} else {
8782 			list_del(&mac_node->node);
8783 			kfree(mac_node);
8784 		}
8785 	}
8786 
8787 	return all_added;
8788 }
8789 
hclge_sync_from_del_list(struct list_head * del_list,struct list_head * mac_list)8790 static void hclge_sync_from_del_list(struct list_head *del_list,
8791 				     struct list_head *mac_list)
8792 {
8793 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8794 
8795 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8796 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8797 		if (new_node) {
8798 			/* If the mac addr exists in the mac list, it means
8799 			 * received a new TO_ADD request during the time window
8800 			 * of configuring the mac address. For the mac node
8801 			 * state is TO_ADD, and the address is already in the
8802 			 * in the hardware(due to delete fail), so we just need
8803 			 * to change the mac node state to ACTIVE.
8804 			 */
8805 			new_node->state = HCLGE_MAC_ACTIVE;
8806 			list_del(&mac_node->node);
8807 			kfree(mac_node);
8808 		} else {
8809 			list_del(&mac_node->node);
8810 			list_add_tail(&mac_node->node, mac_list);
8811 		}
8812 	}
8813 }
8814 
hclge_update_overflow_flags(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type,bool is_all_added)8815 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8816 					enum HCLGE_MAC_ADDR_TYPE mac_type,
8817 					bool is_all_added)
8818 {
8819 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8820 		if (is_all_added)
8821 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8822 		else
8823 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8824 	} else {
8825 		if (is_all_added)
8826 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8827 		else
8828 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8829 	}
8830 }
8831 
hclge_sync_vport_mac_table(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type)8832 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8833 				       enum HCLGE_MAC_ADDR_TYPE mac_type)
8834 {
8835 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8836 	struct list_head tmp_add_list, tmp_del_list;
8837 	struct list_head *list;
8838 	bool all_added;
8839 
8840 	INIT_LIST_HEAD(&tmp_add_list);
8841 	INIT_LIST_HEAD(&tmp_del_list);
8842 
8843 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
8844 	 * we can add/delete these mac addr outside the spin lock
8845 	 */
8846 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8847 		&vport->uc_mac_list : &vport->mc_mac_list;
8848 
8849 	spin_lock_bh(&vport->mac_list_lock);
8850 
8851 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8852 		switch (mac_node->state) {
8853 		case HCLGE_MAC_TO_DEL:
8854 			list_del(&mac_node->node);
8855 			list_add_tail(&mac_node->node, &tmp_del_list);
8856 			break;
8857 		case HCLGE_MAC_TO_ADD:
8858 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8859 			if (!new_node)
8860 				goto stop_traverse;
8861 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8862 			new_node->state = mac_node->state;
8863 			list_add_tail(&new_node->node, &tmp_add_list);
8864 			break;
8865 		default:
8866 			break;
8867 		}
8868 	}
8869 
8870 stop_traverse:
8871 	spin_unlock_bh(&vport->mac_list_lock);
8872 
8873 	/* delete first, in order to get max mac table space for adding */
8874 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8875 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8876 					    hclge_rm_uc_addr_common);
8877 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
8878 					  hclge_add_uc_addr_common);
8879 	} else {
8880 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8881 					    hclge_rm_mc_addr_common);
8882 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
8883 					  hclge_add_mc_addr_common);
8884 	}
8885 
8886 	/* if some mac addresses were added/deleted fail, move back to the
8887 	 * mac_list, and retry at next time.
8888 	 */
8889 	spin_lock_bh(&vport->mac_list_lock);
8890 
8891 	hclge_sync_from_del_list(&tmp_del_list, list);
8892 	all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8893 
8894 	spin_unlock_bh(&vport->mac_list_lock);
8895 
8896 	hclge_update_overflow_flags(vport, mac_type, all_added);
8897 }
8898 
hclge_need_sync_mac_table(struct hclge_vport * vport)8899 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8900 {
8901 	struct hclge_dev *hdev = vport->back;
8902 
8903 	if (test_bit(vport->vport_id, hdev->vport_config_block))
8904 		return false;
8905 
8906 	if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8907 		return true;
8908 
8909 	return false;
8910 }
8911 
hclge_sync_mac_table(struct hclge_dev * hdev)8912 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8913 {
8914 	int i;
8915 
8916 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8917 		struct hclge_vport *vport = &hdev->vport[i];
8918 
8919 		if (!hclge_need_sync_mac_table(vport))
8920 			continue;
8921 
8922 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8923 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8924 	}
8925 }
8926 
hclge_build_del_list(struct list_head * list,bool is_del_list,struct list_head * tmp_del_list)8927 static void hclge_build_del_list(struct list_head *list,
8928 				 bool is_del_list,
8929 				 struct list_head *tmp_del_list)
8930 {
8931 	struct hclge_mac_node *mac_cfg, *tmp;
8932 
8933 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8934 		switch (mac_cfg->state) {
8935 		case HCLGE_MAC_TO_DEL:
8936 		case HCLGE_MAC_ACTIVE:
8937 			list_del(&mac_cfg->node);
8938 			list_add_tail(&mac_cfg->node, tmp_del_list);
8939 			break;
8940 		case HCLGE_MAC_TO_ADD:
8941 			if (is_del_list) {
8942 				list_del(&mac_cfg->node);
8943 				kfree(mac_cfg);
8944 			}
8945 			break;
8946 		}
8947 	}
8948 }
8949 
hclge_unsync_del_list(struct hclge_vport * vport,int (* unsync)(struct hclge_vport * vport,const unsigned char * addr),bool is_del_list,struct list_head * tmp_del_list)8950 static void hclge_unsync_del_list(struct hclge_vport *vport,
8951 				  int (*unsync)(struct hclge_vport *vport,
8952 						const unsigned char *addr),
8953 				  bool is_del_list,
8954 				  struct list_head *tmp_del_list)
8955 {
8956 	struct hclge_mac_node *mac_cfg, *tmp;
8957 	int ret;
8958 
8959 	list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
8960 		ret = unsync(vport, mac_cfg->mac_addr);
8961 		if (!ret || ret == -ENOENT) {
8962 			/* clear all mac addr from hardware, but remain these
8963 			 * mac addr in the mac list, and restore them after
8964 			 * vf reset finished.
8965 			 */
8966 			if (!is_del_list &&
8967 			    mac_cfg->state == HCLGE_MAC_ACTIVE) {
8968 				mac_cfg->state = HCLGE_MAC_TO_ADD;
8969 			} else {
8970 				list_del(&mac_cfg->node);
8971 				kfree(mac_cfg);
8972 			}
8973 		} else if (is_del_list) {
8974 			mac_cfg->state = HCLGE_MAC_TO_DEL;
8975 		}
8976 	}
8977 }
8978 
hclge_rm_vport_all_mac_table(struct hclge_vport * vport,bool is_del_list,enum HCLGE_MAC_ADDR_TYPE mac_type)8979 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8980 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
8981 {
8982 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8983 	struct hclge_dev *hdev = vport->back;
8984 	struct list_head tmp_del_list, *list;
8985 
8986 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8987 		list = &vport->uc_mac_list;
8988 		unsync = hclge_rm_uc_addr_common;
8989 	} else {
8990 		list = &vport->mc_mac_list;
8991 		unsync = hclge_rm_mc_addr_common;
8992 	}
8993 
8994 	INIT_LIST_HEAD(&tmp_del_list);
8995 
8996 	if (!is_del_list)
8997 		set_bit(vport->vport_id, hdev->vport_config_block);
8998 
8999 	spin_lock_bh(&vport->mac_list_lock);
9000 
9001 	hclge_build_del_list(list, is_del_list, &tmp_del_list);
9002 
9003 	spin_unlock_bh(&vport->mac_list_lock);
9004 
9005 	hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9006 
9007 	spin_lock_bh(&vport->mac_list_lock);
9008 
9009 	hclge_sync_from_del_list(&tmp_del_list, list);
9010 
9011 	spin_unlock_bh(&vport->mac_list_lock);
9012 }
9013 
9014 /* remove all mac address when uninitailize */
hclge_uninit_vport_mac_list(struct hclge_vport * vport,enum HCLGE_MAC_ADDR_TYPE mac_type)9015 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9016 					enum HCLGE_MAC_ADDR_TYPE mac_type)
9017 {
9018 	struct hclge_mac_node *mac_node, *tmp;
9019 	struct hclge_dev *hdev = vport->back;
9020 	struct list_head tmp_del_list, *list;
9021 
9022 	INIT_LIST_HEAD(&tmp_del_list);
9023 
9024 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9025 		&vport->uc_mac_list : &vport->mc_mac_list;
9026 
9027 	spin_lock_bh(&vport->mac_list_lock);
9028 
9029 	list_for_each_entry_safe(mac_node, tmp, list, node) {
9030 		switch (mac_node->state) {
9031 		case HCLGE_MAC_TO_DEL:
9032 		case HCLGE_MAC_ACTIVE:
9033 			list_del(&mac_node->node);
9034 			list_add_tail(&mac_node->node, &tmp_del_list);
9035 			break;
9036 		case HCLGE_MAC_TO_ADD:
9037 			list_del(&mac_node->node);
9038 			kfree(mac_node);
9039 			break;
9040 		}
9041 	}
9042 
9043 	spin_unlock_bh(&vport->mac_list_lock);
9044 
9045 	if (mac_type == HCLGE_MAC_ADDR_UC)
9046 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9047 					    hclge_rm_uc_addr_common);
9048 	else
9049 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9050 					    hclge_rm_mc_addr_common);
9051 
9052 	if (!list_empty(&tmp_del_list))
9053 		dev_warn(&hdev->pdev->dev,
9054 			 "uninit %s mac list for vport %u not completely.\n",
9055 			 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9056 			 vport->vport_id);
9057 
9058 	list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9059 		list_del(&mac_node->node);
9060 		kfree(mac_node);
9061 	}
9062 }
9063 
hclge_uninit_mac_table(struct hclge_dev * hdev)9064 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9065 {
9066 	struct hclge_vport *vport;
9067 	int i;
9068 
9069 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9070 		vport = &hdev->vport[i];
9071 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9072 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9073 	}
9074 }
9075 
hclge_get_mac_ethertype_cmd_status(struct hclge_dev * hdev,u16 cmdq_resp,u8 resp_code)9076 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9077 					      u16 cmdq_resp, u8 resp_code)
9078 {
9079 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
9080 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
9081 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
9082 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
9083 
9084 	int return_status;
9085 
9086 	if (cmdq_resp) {
9087 		dev_err(&hdev->pdev->dev,
9088 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9089 			cmdq_resp);
9090 		return -EIO;
9091 	}
9092 
9093 	switch (resp_code) {
9094 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
9095 	case HCLGE_ETHERTYPE_ALREADY_ADD:
9096 		return_status = 0;
9097 		break;
9098 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9099 		dev_err(&hdev->pdev->dev,
9100 			"add mac ethertype failed for manager table overflow.\n");
9101 		return_status = -EIO;
9102 		break;
9103 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
9104 		dev_err(&hdev->pdev->dev,
9105 			"add mac ethertype failed for key conflict.\n");
9106 		return_status = -EIO;
9107 		break;
9108 	default:
9109 		dev_err(&hdev->pdev->dev,
9110 			"add mac ethertype failed for undefined, code=%u.\n",
9111 			resp_code);
9112 		return_status = -EIO;
9113 	}
9114 
9115 	return return_status;
9116 }
9117 
hclge_check_vf_mac_exist(struct hclge_vport * vport,int vf_idx,u8 * mac_addr)9118 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9119 				     u8 *mac_addr)
9120 {
9121 	struct hclge_mac_vlan_tbl_entry_cmd req;
9122 	struct hclge_dev *hdev = vport->back;
9123 	struct hclge_desc desc;
9124 	u16 egress_port = 0;
9125 	int i;
9126 
9127 	if (is_zero_ether_addr(mac_addr))
9128 		return false;
9129 
9130 	memset(&req, 0, sizeof(req));
9131 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9132 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9133 	req.egress_port = cpu_to_le16(egress_port);
9134 	hclge_prepare_mac_addr(&req, mac_addr, false);
9135 
9136 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9137 		return true;
9138 
9139 	vf_idx += HCLGE_VF_VPORT_START_NUM;
9140 	for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9141 		if (i != vf_idx &&
9142 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9143 			return true;
9144 
9145 	return false;
9146 }
9147 
hclge_set_vf_mac(struct hnae3_handle * handle,int vf,u8 * mac_addr)9148 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9149 			    u8 *mac_addr)
9150 {
9151 	struct hclge_vport *vport = hclge_get_vport(handle);
9152 	struct hclge_dev *hdev = vport->back;
9153 
9154 	vport = hclge_get_vf_vport(hdev, vf);
9155 	if (!vport)
9156 		return -EINVAL;
9157 
9158 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9159 		dev_info(&hdev->pdev->dev,
9160 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
9161 			 mac_addr);
9162 		return 0;
9163 	}
9164 
9165 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9166 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9167 			mac_addr);
9168 		return -EEXIST;
9169 	}
9170 
9171 	ether_addr_copy(vport->vf_info.mac, mac_addr);
9172 
9173 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9174 		dev_info(&hdev->pdev->dev,
9175 			 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9176 			 vf, mac_addr);
9177 		return hclge_inform_reset_assert_to_vf(vport);
9178 	}
9179 
9180 	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9181 		 vf, mac_addr);
9182 	return 0;
9183 }
9184 
hclge_add_mgr_tbl(struct hclge_dev * hdev,const struct hclge_mac_mgr_tbl_entry_cmd * req)9185 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9186 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
9187 {
9188 	struct hclge_desc desc;
9189 	u8 resp_code;
9190 	u16 retval;
9191 	int ret;
9192 
9193 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9194 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9195 
9196 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9197 	if (ret) {
9198 		dev_err(&hdev->pdev->dev,
9199 			"add mac ethertype failed for cmd_send, ret =%d.\n",
9200 			ret);
9201 		return ret;
9202 	}
9203 
9204 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9205 	retval = le16_to_cpu(desc.retval);
9206 
9207 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9208 }
9209 
init_mgr_tbl(struct hclge_dev * hdev)9210 static int init_mgr_tbl(struct hclge_dev *hdev)
9211 {
9212 	int ret;
9213 	int i;
9214 
9215 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9216 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9217 		if (ret) {
9218 			dev_err(&hdev->pdev->dev,
9219 				"add mac ethertype failed, ret =%d.\n",
9220 				ret);
9221 			return ret;
9222 		}
9223 	}
9224 
9225 	return 0;
9226 }
9227 
hclge_get_mac_addr(struct hnae3_handle * handle,u8 * p)9228 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9229 {
9230 	struct hclge_vport *vport = hclge_get_vport(handle);
9231 	struct hclge_dev *hdev = vport->back;
9232 
9233 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
9234 }
9235 
hclge_update_mac_node_for_dev_addr(struct hclge_vport * vport,const u8 * old_addr,const u8 * new_addr)9236 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9237 				       const u8 *old_addr, const u8 *new_addr)
9238 {
9239 	struct list_head *list = &vport->uc_mac_list;
9240 	struct hclge_mac_node *old_node, *new_node;
9241 
9242 	new_node = hclge_find_mac_node(list, new_addr);
9243 	if (!new_node) {
9244 		new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9245 		if (!new_node)
9246 			return -ENOMEM;
9247 
9248 		new_node->state = HCLGE_MAC_TO_ADD;
9249 		ether_addr_copy(new_node->mac_addr, new_addr);
9250 		list_add(&new_node->node, list);
9251 	} else {
9252 		if (new_node->state == HCLGE_MAC_TO_DEL)
9253 			new_node->state = HCLGE_MAC_ACTIVE;
9254 
9255 		/* make sure the new addr is in the list head, avoid dev
9256 		 * addr may be not re-added into mac table for the umv space
9257 		 * limitation after global/imp reset which will clear mac
9258 		 * table by hardware.
9259 		 */
9260 		list_move(&new_node->node, list);
9261 	}
9262 
9263 	if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9264 		old_node = hclge_find_mac_node(list, old_addr);
9265 		if (old_node) {
9266 			if (old_node->state == HCLGE_MAC_TO_ADD) {
9267 				list_del(&old_node->node);
9268 				kfree(old_node);
9269 			} else {
9270 				old_node->state = HCLGE_MAC_TO_DEL;
9271 			}
9272 		}
9273 	}
9274 
9275 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9276 
9277 	return 0;
9278 }
9279 
hclge_set_mac_addr(struct hnae3_handle * handle,void * p,bool is_first)9280 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9281 			      bool is_first)
9282 {
9283 	const unsigned char *new_addr = (const unsigned char *)p;
9284 	struct hclge_vport *vport = hclge_get_vport(handle);
9285 	struct hclge_dev *hdev = vport->back;
9286 	unsigned char *old_addr = NULL;
9287 	int ret;
9288 
9289 	/* mac addr check */
9290 	if (is_zero_ether_addr(new_addr) ||
9291 	    is_broadcast_ether_addr(new_addr) ||
9292 	    is_multicast_ether_addr(new_addr)) {
9293 		dev_err(&hdev->pdev->dev,
9294 			"change uc mac err! invalid mac: %pM.\n",
9295 			 new_addr);
9296 		return -EINVAL;
9297 	}
9298 
9299 	ret = hclge_pause_addr_cfg(hdev, new_addr);
9300 	if (ret) {
9301 		dev_err(&hdev->pdev->dev,
9302 			"failed to configure mac pause address, ret = %d\n",
9303 			ret);
9304 		return ret;
9305 	}
9306 
9307 	if (!is_first)
9308 		old_addr = hdev->hw.mac.mac_addr;
9309 
9310 	spin_lock_bh(&vport->mac_list_lock);
9311 	ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9312 	if (ret) {
9313 		dev_err(&hdev->pdev->dev,
9314 			"failed to change the mac addr:%pM, ret = %d\n",
9315 			new_addr, ret);
9316 		spin_unlock_bh(&vport->mac_list_lock);
9317 
9318 		if (!is_first)
9319 			hclge_pause_addr_cfg(hdev, old_addr);
9320 
9321 		return ret;
9322 	}
9323 	/* we must update dev addr with spin lock protect, preventing dev addr
9324 	 * being removed by set_rx_mode path.
9325 	 */
9326 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9327 	spin_unlock_bh(&vport->mac_list_lock);
9328 
9329 	hclge_task_schedule(hdev, 0);
9330 
9331 	return 0;
9332 }
9333 
hclge_mii_ioctl(struct hclge_dev * hdev,struct ifreq * ifr,int cmd)9334 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9335 {
9336 	struct mii_ioctl_data *data = if_mii(ifr);
9337 
9338 	if (!hnae3_dev_phy_imp_supported(hdev))
9339 		return -EOPNOTSUPP;
9340 
9341 	switch (cmd) {
9342 	case SIOCGMIIPHY:
9343 		data->phy_id = hdev->hw.mac.phy_addr;
9344 		/* this command reads phy id and register at the same time */
9345 		fallthrough;
9346 	case SIOCGMIIREG:
9347 		data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9348 		return 0;
9349 
9350 	case SIOCSMIIREG:
9351 		return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9352 	default:
9353 		return -EOPNOTSUPP;
9354 	}
9355 }
9356 
hclge_do_ioctl(struct hnae3_handle * handle,struct ifreq * ifr,int cmd)9357 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9358 			  int cmd)
9359 {
9360 	struct hclge_vport *vport = hclge_get_vport(handle);
9361 	struct hclge_dev *hdev = vport->back;
9362 
9363 	if (!hdev->hw.mac.phydev)
9364 		return hclge_mii_ioctl(hdev, ifr, cmd);
9365 
9366 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9367 }
9368 
hclge_set_vlan_filter_ctrl(struct hclge_dev * hdev,u8 vlan_type,u8 fe_type,bool filter_en,u8 vf_id)9369 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9370 				      u8 fe_type, bool filter_en, u8 vf_id)
9371 {
9372 	struct hclge_vlan_filter_ctrl_cmd *req;
9373 	struct hclge_desc desc;
9374 	int ret;
9375 
9376 	/* read current vlan filter parameter */
9377 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9378 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9379 	req->vlan_type = vlan_type;
9380 	req->vf_id = vf_id;
9381 
9382 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9383 	if (ret) {
9384 		dev_err(&hdev->pdev->dev,
9385 			"failed to get vlan filter config, ret = %d.\n", ret);
9386 		return ret;
9387 	}
9388 
9389 	/* modify and write new config parameter */
9390 	hclge_cmd_reuse_desc(&desc, false);
9391 	req->vlan_fe = filter_en ?
9392 			(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9393 
9394 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9395 	if (ret)
9396 		dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9397 			ret);
9398 
9399 	return ret;
9400 }
9401 
9402 #define HCLGE_FILTER_TYPE_VF		0
9403 #define HCLGE_FILTER_TYPE_PORT		1
9404 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
9405 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
9406 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
9407 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
9408 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
9409 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
9410 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
9411 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
9412 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
9413 
hclge_enable_vlan_filter(struct hnae3_handle * handle,bool enable)9414 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9415 {
9416 	struct hclge_vport *vport = hclge_get_vport(handle);
9417 	struct hclge_dev *hdev = vport->back;
9418 
9419 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9420 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9421 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
9422 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9423 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
9424 	} else {
9425 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9426 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
9427 					   0);
9428 	}
9429 	if (enable)
9430 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
9431 	else
9432 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
9433 }
9434 
hclge_set_vf_vlan_filter_cmd(struct hclge_dev * hdev,u16 vfid,bool is_kill,u16 vlan,struct hclge_desc * desc)9435 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9436 					bool is_kill, u16 vlan,
9437 					struct hclge_desc *desc)
9438 {
9439 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
9440 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
9441 	u8 vf_byte_val;
9442 	u8 vf_byte_off;
9443 	int ret;
9444 
9445 	hclge_cmd_setup_basic_desc(&desc[0],
9446 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9447 	hclge_cmd_setup_basic_desc(&desc[1],
9448 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9449 
9450 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9451 
9452 	vf_byte_off = vfid / 8;
9453 	vf_byte_val = 1 << (vfid % 8);
9454 
9455 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9456 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9457 
9458 	req0->vlan_id  = cpu_to_le16(vlan);
9459 	req0->vlan_cfg = is_kill;
9460 
9461 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9462 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9463 	else
9464 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9465 
9466 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
9467 	if (ret) {
9468 		dev_err(&hdev->pdev->dev,
9469 			"Send vf vlan command fail, ret =%d.\n",
9470 			ret);
9471 		return ret;
9472 	}
9473 
9474 	return 0;
9475 }
9476 
hclge_check_vf_vlan_cmd_status(struct hclge_dev * hdev,u16 vfid,bool is_kill,struct hclge_desc * desc)9477 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9478 					  bool is_kill, struct hclge_desc *desc)
9479 {
9480 	struct hclge_vlan_filter_vf_cfg_cmd *req;
9481 
9482 	req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9483 
9484 	if (!is_kill) {
9485 #define HCLGE_VF_VLAN_NO_ENTRY	2
9486 		if (!req->resp_code || req->resp_code == 1)
9487 			return 0;
9488 
9489 		if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9490 			set_bit(vfid, hdev->vf_vlan_full);
9491 			dev_warn(&hdev->pdev->dev,
9492 				 "vf vlan table is full, vf vlan filter is disabled\n");
9493 			return 0;
9494 		}
9495 
9496 		dev_err(&hdev->pdev->dev,
9497 			"Add vf vlan filter fail, ret =%u.\n",
9498 			req->resp_code);
9499 	} else {
9500 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
9501 		if (!req->resp_code)
9502 			return 0;
9503 
9504 		/* vf vlan filter is disabled when vf vlan table is full,
9505 		 * then new vlan id will not be added into vf vlan table.
9506 		 * Just return 0 without warning, avoid massive verbose
9507 		 * print logs when unload.
9508 		 */
9509 		if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9510 			return 0;
9511 
9512 		dev_err(&hdev->pdev->dev,
9513 			"Kill vf vlan filter fail, ret =%u.\n",
9514 			req->resp_code);
9515 	}
9516 
9517 	return -EIO;
9518 }
9519 
hclge_set_vf_vlan_common(struct hclge_dev * hdev,u16 vfid,bool is_kill,u16 vlan)9520 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9521 				    bool is_kill, u16 vlan)
9522 {
9523 	struct hclge_vport *vport = &hdev->vport[vfid];
9524 	struct hclge_desc desc[2];
9525 	int ret;
9526 
9527 	/* if vf vlan table is full, firmware will close vf vlan filter, it
9528 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
9529 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
9530 	 * new vlan, because tx packets with these vlan id will be dropped.
9531 	 */
9532 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9533 		if (vport->vf_info.spoofchk && vlan) {
9534 			dev_err(&hdev->pdev->dev,
9535 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
9536 			return -EPERM;
9537 		}
9538 		return 0;
9539 	}
9540 
9541 	ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9542 	if (ret)
9543 		return ret;
9544 
9545 	return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9546 }
9547 
hclge_set_port_vlan_filter(struct hclge_dev * hdev,__be16 proto,u16 vlan_id,bool is_kill)9548 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9549 				      u16 vlan_id, bool is_kill)
9550 {
9551 	struct hclge_vlan_filter_pf_cfg_cmd *req;
9552 	struct hclge_desc desc;
9553 	u8 vlan_offset_byte_val;
9554 	u8 vlan_offset_byte;
9555 	u8 vlan_offset_160;
9556 	int ret;
9557 
9558 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9559 
9560 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9561 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9562 			   HCLGE_VLAN_BYTE_SIZE;
9563 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9564 
9565 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9566 	req->vlan_offset = vlan_offset_160;
9567 	req->vlan_cfg = is_kill;
9568 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9569 
9570 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9571 	if (ret)
9572 		dev_err(&hdev->pdev->dev,
9573 			"port vlan command, send fail, ret =%d.\n", ret);
9574 	return ret;
9575 }
9576 
hclge_set_vlan_filter_hw(struct hclge_dev * hdev,__be16 proto,u16 vport_id,u16 vlan_id,bool is_kill)9577 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9578 				    u16 vport_id, u16 vlan_id,
9579 				    bool is_kill)
9580 {
9581 	u16 vport_idx, vport_num = 0;
9582 	int ret;
9583 
9584 	if (is_kill && !vlan_id)
9585 		return 0;
9586 
9587 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9588 	if (ret) {
9589 		dev_err(&hdev->pdev->dev,
9590 			"Set %u vport vlan filter config fail, ret =%d.\n",
9591 			vport_id, ret);
9592 		return ret;
9593 	}
9594 
9595 	/* vlan 0 may be added twice when 8021q module is enabled */
9596 	if (!is_kill && !vlan_id &&
9597 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
9598 		return 0;
9599 
9600 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9601 		dev_err(&hdev->pdev->dev,
9602 			"Add port vlan failed, vport %u is already in vlan %u\n",
9603 			vport_id, vlan_id);
9604 		return -EINVAL;
9605 	}
9606 
9607 	if (is_kill &&
9608 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9609 		dev_err(&hdev->pdev->dev,
9610 			"Delete port vlan failed, vport %u is not in vlan %u\n",
9611 			vport_id, vlan_id);
9612 		return -EINVAL;
9613 	}
9614 
9615 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9616 		vport_num++;
9617 
9618 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9619 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9620 						 is_kill);
9621 
9622 	return ret;
9623 }
9624 
hclge_set_vlan_tx_offload_cfg(struct hclge_vport * vport)9625 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9626 {
9627 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9628 	struct hclge_vport_vtag_tx_cfg_cmd *req;
9629 	struct hclge_dev *hdev = vport->back;
9630 	struct hclge_desc desc;
9631 	u16 bmap_index;
9632 	int status;
9633 
9634 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9635 
9636 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9637 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9638 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9639 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9640 		      vcfg->accept_tag1 ? 1 : 0);
9641 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9642 		      vcfg->accept_untag1 ? 1 : 0);
9643 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9644 		      vcfg->accept_tag2 ? 1 : 0);
9645 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9646 		      vcfg->accept_untag2 ? 1 : 0);
9647 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9648 		      vcfg->insert_tag1_en ? 1 : 0);
9649 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9650 		      vcfg->insert_tag2_en ? 1 : 0);
9651 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9652 		      vcfg->tag_shift_mode_en ? 1 : 0);
9653 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9654 
9655 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9656 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9657 			HCLGE_VF_NUM_PER_BYTE;
9658 	req->vf_bitmap[bmap_index] =
9659 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9660 
9661 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9662 	if (status)
9663 		dev_err(&hdev->pdev->dev,
9664 			"Send port txvlan cfg command fail, ret =%d\n",
9665 			status);
9666 
9667 	return status;
9668 }
9669 
hclge_set_vlan_rx_offload_cfg(struct hclge_vport * vport)9670 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9671 {
9672 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9673 	struct hclge_vport_vtag_rx_cfg_cmd *req;
9674 	struct hclge_dev *hdev = vport->back;
9675 	struct hclge_desc desc;
9676 	u16 bmap_index;
9677 	int status;
9678 
9679 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9680 
9681 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9682 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9683 		      vcfg->strip_tag1_en ? 1 : 0);
9684 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9685 		      vcfg->strip_tag2_en ? 1 : 0);
9686 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9687 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
9688 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9689 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
9690 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9691 		      vcfg->strip_tag1_discard_en ? 1 : 0);
9692 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9693 		      vcfg->strip_tag2_discard_en ? 1 : 0);
9694 
9695 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9696 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9697 			HCLGE_VF_NUM_PER_BYTE;
9698 	req->vf_bitmap[bmap_index] =
9699 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9700 
9701 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9702 	if (status)
9703 		dev_err(&hdev->pdev->dev,
9704 			"Send port rxvlan cfg command fail, ret =%d\n",
9705 			status);
9706 
9707 	return status;
9708 }
9709 
hclge_vlan_offload_cfg(struct hclge_vport * vport,u16 port_base_vlan_state,u16 vlan_tag)9710 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9711 				  u16 port_base_vlan_state,
9712 				  u16 vlan_tag)
9713 {
9714 	int ret;
9715 
9716 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9717 		vport->txvlan_cfg.accept_tag1 = true;
9718 		vport->txvlan_cfg.insert_tag1_en = false;
9719 		vport->txvlan_cfg.default_tag1 = 0;
9720 	} else {
9721 		struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9722 
9723 		vport->txvlan_cfg.accept_tag1 =
9724 			ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9725 		vport->txvlan_cfg.insert_tag1_en = true;
9726 		vport->txvlan_cfg.default_tag1 = vlan_tag;
9727 	}
9728 
9729 	vport->txvlan_cfg.accept_untag1 = true;
9730 
9731 	/* accept_tag2 and accept_untag2 are not supported on
9732 	 * pdev revision(0x20), new revision support them,
9733 	 * this two fields can not be configured by user.
9734 	 */
9735 	vport->txvlan_cfg.accept_tag2 = true;
9736 	vport->txvlan_cfg.accept_untag2 = true;
9737 	vport->txvlan_cfg.insert_tag2_en = false;
9738 	vport->txvlan_cfg.default_tag2 = 0;
9739 	vport->txvlan_cfg.tag_shift_mode_en = true;
9740 
9741 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9742 		vport->rxvlan_cfg.strip_tag1_en = false;
9743 		vport->rxvlan_cfg.strip_tag2_en =
9744 				vport->rxvlan_cfg.rx_vlan_offload_en;
9745 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
9746 	} else {
9747 		vport->rxvlan_cfg.strip_tag1_en =
9748 				vport->rxvlan_cfg.rx_vlan_offload_en;
9749 		vport->rxvlan_cfg.strip_tag2_en = true;
9750 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
9751 	}
9752 
9753 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
9754 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9755 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9756 
9757 	ret = hclge_set_vlan_tx_offload_cfg(vport);
9758 	if (ret)
9759 		return ret;
9760 
9761 	return hclge_set_vlan_rx_offload_cfg(vport);
9762 }
9763 
hclge_set_vlan_protocol_type(struct hclge_dev * hdev)9764 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9765 {
9766 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9767 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9768 	struct hclge_desc desc;
9769 	int status;
9770 
9771 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9772 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9773 	rx_req->ot_fst_vlan_type =
9774 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9775 	rx_req->ot_sec_vlan_type =
9776 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9777 	rx_req->in_fst_vlan_type =
9778 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9779 	rx_req->in_sec_vlan_type =
9780 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9781 
9782 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9783 	if (status) {
9784 		dev_err(&hdev->pdev->dev,
9785 			"Send rxvlan protocol type command fail, ret =%d\n",
9786 			status);
9787 		return status;
9788 	}
9789 
9790 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9791 
9792 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9793 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9794 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9795 
9796 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9797 	if (status)
9798 		dev_err(&hdev->pdev->dev,
9799 			"Send txvlan protocol type command fail, ret =%d\n",
9800 			status);
9801 
9802 	return status;
9803 }
9804 
hclge_init_vlan_config(struct hclge_dev * hdev)9805 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9806 {
9807 #define HCLGE_DEF_VLAN_TYPE		0x8100
9808 
9809 	struct hnae3_handle *handle = &hdev->vport[0].nic;
9810 	struct hclge_vport *vport;
9811 	int ret;
9812 	int i;
9813 
9814 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9815 		/* for revision 0x21, vf vlan filter is per function */
9816 		for (i = 0; i < hdev->num_alloc_vport; i++) {
9817 			vport = &hdev->vport[i];
9818 			ret = hclge_set_vlan_filter_ctrl(hdev,
9819 							 HCLGE_FILTER_TYPE_VF,
9820 							 HCLGE_FILTER_FE_EGRESS,
9821 							 true,
9822 							 vport->vport_id);
9823 			if (ret)
9824 				return ret;
9825 		}
9826 
9827 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9828 						 HCLGE_FILTER_FE_INGRESS, true,
9829 						 0);
9830 		if (ret)
9831 			return ret;
9832 	} else {
9833 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9834 						 HCLGE_FILTER_FE_EGRESS_V1_B,
9835 						 true, 0);
9836 		if (ret)
9837 			return ret;
9838 	}
9839 
9840 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
9841 
9842 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9843 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9844 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9845 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9846 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9847 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9848 
9849 	ret = hclge_set_vlan_protocol_type(hdev);
9850 	if (ret)
9851 		return ret;
9852 
9853 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9854 		u16 vlan_tag;
9855 
9856 		vport = &hdev->vport[i];
9857 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9858 
9859 		ret = hclge_vlan_offload_cfg(vport,
9860 					     vport->port_base_vlan_cfg.state,
9861 					     vlan_tag);
9862 		if (ret)
9863 			return ret;
9864 	}
9865 
9866 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9867 }
9868 
hclge_add_vport_vlan_table(struct hclge_vport * vport,u16 vlan_id,bool writen_to_tbl)9869 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9870 				       bool writen_to_tbl)
9871 {
9872 	struct hclge_vport_vlan_cfg *vlan;
9873 
9874 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9875 	if (!vlan)
9876 		return;
9877 
9878 	vlan->hd_tbl_status = writen_to_tbl;
9879 	vlan->vlan_id = vlan_id;
9880 
9881 	list_add_tail(&vlan->node, &vport->vlan_list);
9882 }
9883 
hclge_add_vport_all_vlan_table(struct hclge_vport * vport)9884 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9885 {
9886 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9887 	struct hclge_dev *hdev = vport->back;
9888 	int ret;
9889 
9890 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9891 		if (!vlan->hd_tbl_status) {
9892 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9893 						       vport->vport_id,
9894 						       vlan->vlan_id, false);
9895 			if (ret) {
9896 				dev_err(&hdev->pdev->dev,
9897 					"restore vport vlan list failed, ret=%d\n",
9898 					ret);
9899 				return ret;
9900 			}
9901 		}
9902 		vlan->hd_tbl_status = true;
9903 	}
9904 
9905 	return 0;
9906 }
9907 
hclge_rm_vport_vlan_table(struct hclge_vport * vport,u16 vlan_id,bool is_write_tbl)9908 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9909 				      bool is_write_tbl)
9910 {
9911 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9912 	struct hclge_dev *hdev = vport->back;
9913 
9914 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9915 		if (vlan->vlan_id == vlan_id) {
9916 			if (is_write_tbl && vlan->hd_tbl_status)
9917 				hclge_set_vlan_filter_hw(hdev,
9918 							 htons(ETH_P_8021Q),
9919 							 vport->vport_id,
9920 							 vlan_id,
9921 							 true);
9922 
9923 			list_del(&vlan->node);
9924 			kfree(vlan);
9925 			break;
9926 		}
9927 	}
9928 }
9929 
hclge_rm_vport_all_vlan_table(struct hclge_vport * vport,bool is_del_list)9930 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
9931 {
9932 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9933 	struct hclge_dev *hdev = vport->back;
9934 
9935 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9936 		if (vlan->hd_tbl_status)
9937 			hclge_set_vlan_filter_hw(hdev,
9938 						 htons(ETH_P_8021Q),
9939 						 vport->vport_id,
9940 						 vlan->vlan_id,
9941 						 true);
9942 
9943 		vlan->hd_tbl_status = false;
9944 		if (is_del_list) {
9945 			list_del(&vlan->node);
9946 			kfree(vlan);
9947 		}
9948 	}
9949 	clear_bit(vport->vport_id, hdev->vf_vlan_full);
9950 }
9951 
hclge_uninit_vport_vlan_table(struct hclge_dev * hdev)9952 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
9953 {
9954 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9955 	struct hclge_vport *vport;
9956 	int i;
9957 
9958 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9959 		vport = &hdev->vport[i];
9960 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9961 			list_del(&vlan->node);
9962 			kfree(vlan);
9963 		}
9964 	}
9965 }
9966 
hclge_restore_vport_vlan_table(struct hclge_vport * vport)9967 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
9968 {
9969 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9970 	struct hclge_dev *hdev = vport->back;
9971 	u16 vlan_proto;
9972 	u16 vlan_id;
9973 	u16 state;
9974 	int ret;
9975 
9976 	vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
9977 	vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9978 	state = vport->port_base_vlan_cfg.state;
9979 
9980 	if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
9981 		clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
9982 		hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
9983 					 vport->vport_id, vlan_id,
9984 					 false);
9985 		return;
9986 	}
9987 
9988 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9989 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9990 					       vport->vport_id,
9991 					       vlan->vlan_id, false);
9992 		if (ret)
9993 			break;
9994 		vlan->hd_tbl_status = true;
9995 	}
9996 }
9997 
9998 /* For global reset and imp reset, hardware will clear the mac table,
9999  * so we change the mac address state from ACTIVE to TO_ADD, then they
10000  * can be restored in the service task after reset complete. Furtherly,
10001  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10002  * be restored after reset, so just remove these mac nodes from mac_list.
10003  */
hclge_mac_node_convert_for_reset(struct list_head * list)10004 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10005 {
10006 	struct hclge_mac_node *mac_node, *tmp;
10007 
10008 	list_for_each_entry_safe(mac_node, tmp, list, node) {
10009 		if (mac_node->state == HCLGE_MAC_ACTIVE) {
10010 			mac_node->state = HCLGE_MAC_TO_ADD;
10011 		} else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10012 			list_del(&mac_node->node);
10013 			kfree(mac_node);
10014 		}
10015 	}
10016 }
10017 
hclge_restore_mac_table_common(struct hclge_vport * vport)10018 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10019 {
10020 	spin_lock_bh(&vport->mac_list_lock);
10021 
10022 	hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10023 	hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10024 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10025 
10026 	spin_unlock_bh(&vport->mac_list_lock);
10027 }
10028 
hclge_restore_hw_table(struct hclge_dev * hdev)10029 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10030 {
10031 	struct hclge_vport *vport = &hdev->vport[0];
10032 	struct hnae3_handle *handle = &vport->nic;
10033 
10034 	hclge_restore_mac_table_common(vport);
10035 	hclge_restore_vport_vlan_table(vport);
10036 	set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
10037 	set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10038 	hclge_restore_fd_entries(handle);
10039 }
10040 
hclge_en_hw_strip_rxvtag(struct hnae3_handle * handle,bool enable)10041 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10042 {
10043 	struct hclge_vport *vport = hclge_get_vport(handle);
10044 
10045 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10046 		vport->rxvlan_cfg.strip_tag1_en = false;
10047 		vport->rxvlan_cfg.strip_tag2_en = enable;
10048 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
10049 	} else {
10050 		vport->rxvlan_cfg.strip_tag1_en = enable;
10051 		vport->rxvlan_cfg.strip_tag2_en = true;
10052 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
10053 	}
10054 
10055 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
10056 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10057 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10058 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10059 
10060 	return hclge_set_vlan_rx_offload_cfg(vport);
10061 }
10062 
hclge_update_vlan_filter_entries(struct hclge_vport * vport,u16 port_base_vlan_state,struct hclge_vlan_info * new_info,struct hclge_vlan_info * old_info)10063 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10064 					    u16 port_base_vlan_state,
10065 					    struct hclge_vlan_info *new_info,
10066 					    struct hclge_vlan_info *old_info)
10067 {
10068 	struct hclge_dev *hdev = vport->back;
10069 	int ret;
10070 
10071 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10072 		hclge_rm_vport_all_vlan_table(vport, false);
10073 		return hclge_set_vlan_filter_hw(hdev,
10074 						 htons(new_info->vlan_proto),
10075 						 vport->vport_id,
10076 						 new_info->vlan_tag,
10077 						 false);
10078 	}
10079 
10080 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10081 				       vport->vport_id, old_info->vlan_tag,
10082 				       true);
10083 	if (ret)
10084 		return ret;
10085 
10086 	return hclge_add_vport_all_vlan_table(vport);
10087 }
10088 
hclge_update_port_base_vlan_cfg(struct hclge_vport * vport,u16 state,struct hclge_vlan_info * vlan_info)10089 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10090 				    struct hclge_vlan_info *vlan_info)
10091 {
10092 	struct hnae3_handle *nic = &vport->nic;
10093 	struct hclge_vlan_info *old_vlan_info;
10094 	struct hclge_dev *hdev = vport->back;
10095 	int ret;
10096 
10097 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10098 
10099 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
10100 	if (ret)
10101 		return ret;
10102 
10103 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10104 		/* add new VLAN tag */
10105 		ret = hclge_set_vlan_filter_hw(hdev,
10106 					       htons(vlan_info->vlan_proto),
10107 					       vport->vport_id,
10108 					       vlan_info->vlan_tag,
10109 					       false);
10110 		if (ret)
10111 			return ret;
10112 
10113 		/* remove old VLAN tag */
10114 		ret = hclge_set_vlan_filter_hw(hdev,
10115 					       htons(old_vlan_info->vlan_proto),
10116 					       vport->vport_id,
10117 					       old_vlan_info->vlan_tag,
10118 					       true);
10119 		if (ret)
10120 			return ret;
10121 
10122 		goto update;
10123 	}
10124 
10125 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10126 					       old_vlan_info);
10127 	if (ret)
10128 		return ret;
10129 
10130 	/* update state only when disable/enable port based VLAN */
10131 	vport->port_base_vlan_cfg.state = state;
10132 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10133 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10134 	else
10135 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10136 
10137 update:
10138 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
10139 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
10140 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
10141 
10142 	return 0;
10143 }
10144 
hclge_get_port_base_vlan_state(struct hclge_vport * vport,enum hnae3_port_base_vlan_state state,u16 vlan)10145 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10146 					  enum hnae3_port_base_vlan_state state,
10147 					  u16 vlan)
10148 {
10149 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10150 		if (!vlan)
10151 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10152 		else
10153 			return HNAE3_PORT_BASE_VLAN_ENABLE;
10154 	} else {
10155 		if (!vlan)
10156 			return HNAE3_PORT_BASE_VLAN_DISABLE;
10157 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
10158 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10159 		else
10160 			return HNAE3_PORT_BASE_VLAN_MODIFY;
10161 	}
10162 }
10163 
hclge_set_vf_vlan_filter(struct hnae3_handle * handle,int vfid,u16 vlan,u8 qos,__be16 proto)10164 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10165 				    u16 vlan, u8 qos, __be16 proto)
10166 {
10167 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10168 	struct hclge_vport *vport = hclge_get_vport(handle);
10169 	struct hclge_dev *hdev = vport->back;
10170 	struct hclge_vlan_info vlan_info;
10171 	u16 state;
10172 	int ret;
10173 
10174 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10175 		return -EOPNOTSUPP;
10176 
10177 	vport = hclge_get_vf_vport(hdev, vfid);
10178 	if (!vport)
10179 		return -EINVAL;
10180 
10181 	/* qos is a 3 bits value, so can not be bigger than 7 */
10182 	if (vlan > VLAN_N_VID - 1 || qos > 7)
10183 		return -EINVAL;
10184 	if (proto != htons(ETH_P_8021Q))
10185 		return -EPROTONOSUPPORT;
10186 
10187 	state = hclge_get_port_base_vlan_state(vport,
10188 					       vport->port_base_vlan_cfg.state,
10189 					       vlan);
10190 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10191 		return 0;
10192 
10193 	vlan_info.vlan_tag = vlan;
10194 	vlan_info.qos = qos;
10195 	vlan_info.vlan_proto = ntohs(proto);
10196 
10197 	ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10198 	if (ret) {
10199 		dev_err(&hdev->pdev->dev,
10200 			"failed to update port base vlan for vf %d, ret = %d\n",
10201 			vfid, ret);
10202 		return ret;
10203 	}
10204 
10205 	/* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10206 	 * VLAN state.
10207 	 */
10208 	if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10209 	    test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10210 		hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10211 						  vport->vport_id, state,
10212 						  vlan, qos,
10213 						  ntohs(proto));
10214 
10215 	return 0;
10216 }
10217 
hclge_clear_vf_vlan(struct hclge_dev * hdev)10218 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10219 {
10220 	struct hclge_vlan_info *vlan_info;
10221 	struct hclge_vport *vport;
10222 	int ret;
10223 	int vf;
10224 
10225 	/* clear port base vlan for all vf */
10226 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10227 		vport = &hdev->vport[vf];
10228 		vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10229 
10230 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10231 					       vport->vport_id,
10232 					       vlan_info->vlan_tag, true);
10233 		if (ret)
10234 			dev_err(&hdev->pdev->dev,
10235 				"failed to clear vf vlan for vf%d, ret = %d\n",
10236 				vf - HCLGE_VF_VPORT_START_NUM, ret);
10237 	}
10238 }
10239 
hclge_set_vlan_filter(struct hnae3_handle * handle,__be16 proto,u16 vlan_id,bool is_kill)10240 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10241 			  u16 vlan_id, bool is_kill)
10242 {
10243 	struct hclge_vport *vport = hclge_get_vport(handle);
10244 	struct hclge_dev *hdev = vport->back;
10245 	bool writen_to_tbl = false;
10246 	int ret = 0;
10247 
10248 	/* When device is resetting or reset failed, firmware is unable to
10249 	 * handle mailbox. Just record the vlan id, and remove it after
10250 	 * reset finished.
10251 	 */
10252 	if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10253 	     test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10254 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10255 		return -EBUSY;
10256 	}
10257 
10258 	/* when port base vlan enabled, we use port base vlan as the vlan
10259 	 * filter entry. In this case, we don't update vlan filter table
10260 	 * when user add new vlan or remove exist vlan, just update the vport
10261 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
10262 	 * table until port base vlan disabled
10263 	 */
10264 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10265 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10266 					       vlan_id, is_kill);
10267 		writen_to_tbl = true;
10268 	}
10269 
10270 	if (!ret) {
10271 		if (is_kill)
10272 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10273 		else
10274 			hclge_add_vport_vlan_table(vport, vlan_id,
10275 						   writen_to_tbl);
10276 	} else if (is_kill) {
10277 		/* when remove hw vlan filter failed, record the vlan id,
10278 		 * and try to remove it from hw later, to be consistence
10279 		 * with stack
10280 		 */
10281 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10282 	}
10283 	return ret;
10284 }
10285 
hclge_sync_vlan_filter(struct hclge_dev * hdev)10286 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10287 {
10288 #define HCLGE_MAX_SYNC_COUNT	60
10289 
10290 	int i, ret, sync_cnt = 0;
10291 	u16 vlan_id;
10292 
10293 	/* start from vport 1 for PF is always alive */
10294 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10295 		struct hclge_vport *vport = &hdev->vport[i];
10296 
10297 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10298 					 VLAN_N_VID);
10299 		while (vlan_id != VLAN_N_VID) {
10300 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10301 						       vport->vport_id, vlan_id,
10302 						       true);
10303 			if (ret && ret != -EINVAL)
10304 				return;
10305 
10306 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10307 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10308 
10309 			sync_cnt++;
10310 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10311 				return;
10312 
10313 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10314 						 VLAN_N_VID);
10315 		}
10316 	}
10317 }
10318 
hclge_set_mac_mtu(struct hclge_dev * hdev,int new_mps)10319 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10320 {
10321 	struct hclge_config_max_frm_size_cmd *req;
10322 	struct hclge_desc desc;
10323 
10324 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10325 
10326 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10327 	req->max_frm_size = cpu_to_le16(new_mps);
10328 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10329 
10330 	return hclge_cmd_send(&hdev->hw, &desc, 1);
10331 }
10332 
hclge_set_mtu(struct hnae3_handle * handle,int new_mtu)10333 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10334 {
10335 	struct hclge_vport *vport = hclge_get_vport(handle);
10336 
10337 	return hclge_set_vport_mtu(vport, new_mtu);
10338 }
10339 
hclge_set_vport_mtu(struct hclge_vport * vport,int new_mtu)10340 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10341 {
10342 	struct hclge_dev *hdev = vport->back;
10343 	int i, max_frm_size, ret;
10344 
10345 	/* HW supprt 2 layer vlan */
10346 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10347 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10348 	    max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10349 		return -EINVAL;
10350 
10351 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10352 	mutex_lock(&hdev->vport_lock);
10353 	/* VF's mps must fit within hdev->mps */
10354 	if (vport->vport_id && max_frm_size > hdev->mps) {
10355 		mutex_unlock(&hdev->vport_lock);
10356 		return -EINVAL;
10357 	} else if (vport->vport_id) {
10358 		vport->mps = max_frm_size;
10359 		mutex_unlock(&hdev->vport_lock);
10360 		return 0;
10361 	}
10362 
10363 	/* PF's mps must be greater then VF's mps */
10364 	for (i = 1; i < hdev->num_alloc_vport; i++)
10365 		if (max_frm_size < hdev->vport[i].mps) {
10366 			mutex_unlock(&hdev->vport_lock);
10367 			return -EINVAL;
10368 		}
10369 
10370 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10371 
10372 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
10373 	if (ret) {
10374 		dev_err(&hdev->pdev->dev,
10375 			"Change mtu fail, ret =%d\n", ret);
10376 		goto out;
10377 	}
10378 
10379 	hdev->mps = max_frm_size;
10380 	vport->mps = max_frm_size;
10381 
10382 	ret = hclge_buffer_alloc(hdev);
10383 	if (ret)
10384 		dev_err(&hdev->pdev->dev,
10385 			"Allocate buffer fail, ret =%d\n", ret);
10386 
10387 out:
10388 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10389 	mutex_unlock(&hdev->vport_lock);
10390 	return ret;
10391 }
10392 
hclge_reset_tqp_cmd_send(struct hclge_dev * hdev,u16 queue_id,bool enable)10393 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10394 				    bool enable)
10395 {
10396 	struct hclge_reset_tqp_queue_cmd *req;
10397 	struct hclge_desc desc;
10398 	int ret;
10399 
10400 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10401 
10402 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10403 	req->tqp_id = cpu_to_le16(queue_id);
10404 	if (enable)
10405 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10406 
10407 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10408 	if (ret) {
10409 		dev_err(&hdev->pdev->dev,
10410 			"Send tqp reset cmd error, status =%d\n", ret);
10411 		return ret;
10412 	}
10413 
10414 	return 0;
10415 }
10416 
hclge_get_reset_status(struct hclge_dev * hdev,u16 queue_id)10417 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
10418 {
10419 	struct hclge_reset_tqp_queue_cmd *req;
10420 	struct hclge_desc desc;
10421 	int ret;
10422 
10423 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10424 
10425 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10426 	req->tqp_id = cpu_to_le16(queue_id);
10427 
10428 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10429 	if (ret) {
10430 		dev_err(&hdev->pdev->dev,
10431 			"Get reset status error, status =%d\n", ret);
10432 		return ret;
10433 	}
10434 
10435 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10436 }
10437 
hclge_covert_handle_qid_global(struct hnae3_handle * handle,u16 queue_id)10438 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10439 {
10440 	struct hnae3_queue *queue;
10441 	struct hclge_tqp *tqp;
10442 
10443 	queue = handle->kinfo.tqp[queue_id];
10444 	tqp = container_of(queue, struct hclge_tqp, q);
10445 
10446 	return tqp->index;
10447 }
10448 
hclge_reset_tqp_cmd(struct hnae3_handle * handle)10449 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10450 {
10451 	struct hclge_vport *vport = hclge_get_vport(handle);
10452 	struct hclge_dev *hdev = vport->back;
10453 	u16 reset_try_times = 0;
10454 	int reset_status;
10455 	u16 queue_gid;
10456 	int ret;
10457 	u16 i;
10458 
10459 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
10460 		queue_gid = hclge_covert_handle_qid_global(handle, i);
10461 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10462 		if (ret) {
10463 			dev_err(&hdev->pdev->dev,
10464 				"failed to send reset tqp cmd, ret = %d\n",
10465 				ret);
10466 			return ret;
10467 		}
10468 
10469 		while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10470 			reset_status = hclge_get_reset_status(hdev, queue_gid);
10471 			if (reset_status)
10472 				break;
10473 
10474 			/* Wait for tqp hw reset */
10475 			usleep_range(1000, 1200);
10476 		}
10477 
10478 		if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10479 			dev_err(&hdev->pdev->dev,
10480 				"wait for tqp hw reset timeout\n");
10481 			return -ETIME;
10482 		}
10483 
10484 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10485 		if (ret) {
10486 			dev_err(&hdev->pdev->dev,
10487 				"failed to deassert soft reset, ret = %d\n",
10488 				ret);
10489 			return ret;
10490 		}
10491 		reset_try_times = 0;
10492 	}
10493 	return 0;
10494 }
10495 
hclge_reset_rcb(struct hnae3_handle * handle)10496 static int hclge_reset_rcb(struct hnae3_handle *handle)
10497 {
10498 #define HCLGE_RESET_RCB_NOT_SUPPORT	0U
10499 #define HCLGE_RESET_RCB_SUCCESS		1U
10500 
10501 	struct hclge_vport *vport = hclge_get_vport(handle);
10502 	struct hclge_dev *hdev = vport->back;
10503 	struct hclge_reset_cmd *req;
10504 	struct hclge_desc desc;
10505 	u8 return_status;
10506 	u16 queue_gid;
10507 	int ret;
10508 
10509 	queue_gid = hclge_covert_handle_qid_global(handle, 0);
10510 
10511 	req = (struct hclge_reset_cmd *)desc.data;
10512 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10513 	hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10514 	req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10515 	req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10516 
10517 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10518 	if (ret) {
10519 		dev_err(&hdev->pdev->dev,
10520 			"failed to send rcb reset cmd, ret = %d\n", ret);
10521 		return ret;
10522 	}
10523 
10524 	return_status = req->fun_reset_rcb_return_status;
10525 	if (return_status == HCLGE_RESET_RCB_SUCCESS)
10526 		return 0;
10527 
10528 	if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10529 		dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10530 			return_status);
10531 		return -EIO;
10532 	}
10533 
10534 	/* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10535 	 * again to reset all tqps
10536 	 */
10537 	return hclge_reset_tqp_cmd(handle);
10538 }
10539 
hclge_reset_tqp(struct hnae3_handle * handle)10540 int hclge_reset_tqp(struct hnae3_handle *handle)
10541 {
10542 	struct hclge_vport *vport = hclge_get_vport(handle);
10543 	struct hclge_dev *hdev = vport->back;
10544 	int ret;
10545 
10546 	/* only need to disable PF's tqp */
10547 	if (!vport->vport_id) {
10548 		ret = hclge_tqp_enable(handle, false);
10549 		if (ret) {
10550 			dev_err(&hdev->pdev->dev,
10551 				"failed to disable tqp, ret = %d\n", ret);
10552 			return ret;
10553 		}
10554 	}
10555 
10556 	return hclge_reset_rcb(handle);
10557 }
10558 
hclge_get_fw_version(struct hnae3_handle * handle)10559 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10560 {
10561 	struct hclge_vport *vport = hclge_get_vport(handle);
10562 	struct hclge_dev *hdev = vport->back;
10563 
10564 	return hdev->fw_version;
10565 }
10566 
hclge_set_flowctrl_adv(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)10567 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10568 {
10569 	struct phy_device *phydev = hdev->hw.mac.phydev;
10570 
10571 	if (!phydev)
10572 		return;
10573 
10574 	phy_set_asym_pause(phydev, rx_en, tx_en);
10575 }
10576 
hclge_cfg_pauseparam(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)10577 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10578 {
10579 	int ret;
10580 
10581 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10582 		return 0;
10583 
10584 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10585 	if (ret)
10586 		dev_err(&hdev->pdev->dev,
10587 			"configure pauseparam error, ret = %d.\n", ret);
10588 
10589 	return ret;
10590 }
10591 
hclge_cfg_flowctrl(struct hclge_dev * hdev)10592 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10593 {
10594 	struct phy_device *phydev = hdev->hw.mac.phydev;
10595 	u16 remote_advertising = 0;
10596 	u16 local_advertising;
10597 	u32 rx_pause, tx_pause;
10598 	u8 flowctl;
10599 
10600 	if (!phydev->link || !phydev->autoneg)
10601 		return 0;
10602 
10603 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10604 
10605 	if (phydev->pause)
10606 		remote_advertising = LPA_PAUSE_CAP;
10607 
10608 	if (phydev->asym_pause)
10609 		remote_advertising |= LPA_PAUSE_ASYM;
10610 
10611 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10612 					   remote_advertising);
10613 	tx_pause = flowctl & FLOW_CTRL_TX;
10614 	rx_pause = flowctl & FLOW_CTRL_RX;
10615 
10616 	if (phydev->duplex == HCLGE_MAC_HALF) {
10617 		tx_pause = 0;
10618 		rx_pause = 0;
10619 	}
10620 
10621 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10622 }
10623 
hclge_get_pauseparam(struct hnae3_handle * handle,u32 * auto_neg,u32 * rx_en,u32 * tx_en)10624 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10625 				 u32 *rx_en, u32 *tx_en)
10626 {
10627 	struct hclge_vport *vport = hclge_get_vport(handle);
10628 	struct hclge_dev *hdev = vport->back;
10629 	u8 media_type = hdev->hw.mac.media_type;
10630 
10631 	*auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10632 		    hclge_get_autoneg(handle) : 0;
10633 
10634 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10635 		*rx_en = 0;
10636 		*tx_en = 0;
10637 		return;
10638 	}
10639 
10640 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10641 		*rx_en = 1;
10642 		*tx_en = 0;
10643 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10644 		*tx_en = 1;
10645 		*rx_en = 0;
10646 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10647 		*rx_en = 1;
10648 		*tx_en = 1;
10649 	} else {
10650 		*rx_en = 0;
10651 		*tx_en = 0;
10652 	}
10653 }
10654 
hclge_record_user_pauseparam(struct hclge_dev * hdev,u32 rx_en,u32 tx_en)10655 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10656 					 u32 rx_en, u32 tx_en)
10657 {
10658 	if (rx_en && tx_en)
10659 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
10660 	else if (rx_en && !tx_en)
10661 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10662 	else if (!rx_en && tx_en)
10663 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10664 	else
10665 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
10666 
10667 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10668 }
10669 
hclge_set_pauseparam(struct hnae3_handle * handle,u32 auto_neg,u32 rx_en,u32 tx_en)10670 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10671 				u32 rx_en, u32 tx_en)
10672 {
10673 	struct hclge_vport *vport = hclge_get_vport(handle);
10674 	struct hclge_dev *hdev = vport->back;
10675 	struct phy_device *phydev = hdev->hw.mac.phydev;
10676 	u32 fc_autoneg;
10677 
10678 	if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10679 		fc_autoneg = hclge_get_autoneg(handle);
10680 		if (auto_neg != fc_autoneg) {
10681 			dev_info(&hdev->pdev->dev,
10682 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10683 			return -EOPNOTSUPP;
10684 		}
10685 	}
10686 
10687 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10688 		dev_info(&hdev->pdev->dev,
10689 			 "Priority flow control enabled. Cannot set link flow control.\n");
10690 		return -EOPNOTSUPP;
10691 	}
10692 
10693 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10694 
10695 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10696 
10697 	if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10698 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10699 
10700 	if (phydev)
10701 		return phy_start_aneg(phydev);
10702 
10703 	return -EOPNOTSUPP;
10704 }
10705 
hclge_get_ksettings_an_result(struct hnae3_handle * handle,u8 * auto_neg,u32 * speed,u8 * duplex)10706 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10707 					  u8 *auto_neg, u32 *speed, u8 *duplex)
10708 {
10709 	struct hclge_vport *vport = hclge_get_vport(handle);
10710 	struct hclge_dev *hdev = vport->back;
10711 
10712 	if (speed)
10713 		*speed = hdev->hw.mac.speed;
10714 	if (duplex)
10715 		*duplex = hdev->hw.mac.duplex;
10716 	if (auto_neg)
10717 		*auto_neg = hdev->hw.mac.autoneg;
10718 }
10719 
hclge_get_media_type(struct hnae3_handle * handle,u8 * media_type,u8 * module_type)10720 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10721 				 u8 *module_type)
10722 {
10723 	struct hclge_vport *vport = hclge_get_vport(handle);
10724 	struct hclge_dev *hdev = vport->back;
10725 
10726 	/* When nic is down, the service task is not running, doesn't update
10727 	 * the port information per second. Query the port information before
10728 	 * return the media type, ensure getting the correct media information.
10729 	 */
10730 	hclge_update_port_info(hdev);
10731 
10732 	if (media_type)
10733 		*media_type = hdev->hw.mac.media_type;
10734 
10735 	if (module_type)
10736 		*module_type = hdev->hw.mac.module_type;
10737 }
10738 
hclge_get_mdix_mode(struct hnae3_handle * handle,u8 * tp_mdix_ctrl,u8 * tp_mdix)10739 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10740 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
10741 {
10742 	struct hclge_vport *vport = hclge_get_vport(handle);
10743 	struct hclge_dev *hdev = vport->back;
10744 	struct phy_device *phydev = hdev->hw.mac.phydev;
10745 	int mdix_ctrl, mdix, is_resolved;
10746 	unsigned int retval;
10747 
10748 	if (!phydev) {
10749 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10750 		*tp_mdix = ETH_TP_MDI_INVALID;
10751 		return;
10752 	}
10753 
10754 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10755 
10756 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10757 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10758 				    HCLGE_PHY_MDIX_CTRL_S);
10759 
10760 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10761 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10762 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10763 
10764 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10765 
10766 	switch (mdix_ctrl) {
10767 	case 0x0:
10768 		*tp_mdix_ctrl = ETH_TP_MDI;
10769 		break;
10770 	case 0x1:
10771 		*tp_mdix_ctrl = ETH_TP_MDI_X;
10772 		break;
10773 	case 0x3:
10774 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10775 		break;
10776 	default:
10777 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10778 		break;
10779 	}
10780 
10781 	if (!is_resolved)
10782 		*tp_mdix = ETH_TP_MDI_INVALID;
10783 	else if (mdix)
10784 		*tp_mdix = ETH_TP_MDI_X;
10785 	else
10786 		*tp_mdix = ETH_TP_MDI;
10787 }
10788 
hclge_info_show(struct hclge_dev * hdev)10789 static void hclge_info_show(struct hclge_dev *hdev)
10790 {
10791 	struct device *dev = &hdev->pdev->dev;
10792 
10793 	dev_info(dev, "PF info begin:\n");
10794 
10795 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10796 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10797 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10798 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10799 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10800 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10801 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10802 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10803 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10804 	dev_info(dev, "This is %s PF\n",
10805 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10806 	dev_info(dev, "DCB %s\n",
10807 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10808 	dev_info(dev, "MQPRIO %s\n",
10809 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10810 
10811 	dev_info(dev, "PF info end.\n");
10812 }
10813 
hclge_init_nic_client_instance(struct hnae3_ae_dev * ae_dev,struct hclge_vport * vport)10814 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10815 					  struct hclge_vport *vport)
10816 {
10817 	struct hnae3_client *client = vport->nic.client;
10818 	struct hclge_dev *hdev = ae_dev->priv;
10819 	int rst_cnt = hdev->rst_stats.reset_cnt;
10820 	int ret;
10821 
10822 	ret = client->ops->init_instance(&vport->nic);
10823 	if (ret)
10824 		return ret;
10825 
10826 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10827 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10828 	    rst_cnt != hdev->rst_stats.reset_cnt) {
10829 		ret = -EBUSY;
10830 		goto init_nic_err;
10831 	}
10832 
10833 	/* Enable nic hw error interrupts */
10834 	ret = hclge_config_nic_hw_error(hdev, true);
10835 	if (ret) {
10836 		dev_err(&ae_dev->pdev->dev,
10837 			"fail(%d) to enable hw error interrupts\n", ret);
10838 		goto init_nic_err;
10839 	}
10840 
10841 	hnae3_set_client_init_flag(client, ae_dev, 1);
10842 
10843 	if (netif_msg_drv(&hdev->vport->nic))
10844 		hclge_info_show(hdev);
10845 
10846 	return ret;
10847 
10848 init_nic_err:
10849 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10850 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10851 		msleep(HCLGE_WAIT_RESET_DONE);
10852 
10853 	client->ops->uninit_instance(&vport->nic, 0);
10854 
10855 	return ret;
10856 }
10857 
hclge_init_roce_client_instance(struct hnae3_ae_dev * ae_dev,struct hclge_vport * vport)10858 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
10859 					   struct hclge_vport *vport)
10860 {
10861 	struct hclge_dev *hdev = ae_dev->priv;
10862 	struct hnae3_client *client;
10863 	int rst_cnt;
10864 	int ret;
10865 
10866 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
10867 	    !hdev->nic_client)
10868 		return 0;
10869 
10870 	client = hdev->roce_client;
10871 	ret = hclge_init_roce_base_info(vport);
10872 	if (ret)
10873 		return ret;
10874 
10875 	rst_cnt = hdev->rst_stats.reset_cnt;
10876 	ret = client->ops->init_instance(&vport->roce);
10877 	if (ret)
10878 		return ret;
10879 
10880 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10881 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10882 	    rst_cnt != hdev->rst_stats.reset_cnt) {
10883 		ret = -EBUSY;
10884 		goto init_roce_err;
10885 	}
10886 
10887 	/* Enable roce ras interrupts */
10888 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
10889 	if (ret) {
10890 		dev_err(&ae_dev->pdev->dev,
10891 			"fail(%d) to enable roce ras interrupts\n", ret);
10892 		goto init_roce_err;
10893 	}
10894 
10895 	hnae3_set_client_init_flag(client, ae_dev, 1);
10896 
10897 	return 0;
10898 
10899 init_roce_err:
10900 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10901 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10902 		msleep(HCLGE_WAIT_RESET_DONE);
10903 
10904 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10905 
10906 	return ret;
10907 }
10908 
hclge_init_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)10909 static int hclge_init_client_instance(struct hnae3_client *client,
10910 				      struct hnae3_ae_dev *ae_dev)
10911 {
10912 	struct hclge_dev *hdev = ae_dev->priv;
10913 	struct hclge_vport *vport = &hdev->vport[0];
10914 	int ret;
10915 
10916 	switch (client->type) {
10917 	case HNAE3_CLIENT_KNIC:
10918 		hdev->nic_client = client;
10919 		vport->nic.client = client;
10920 		ret = hclge_init_nic_client_instance(ae_dev, vport);
10921 		if (ret)
10922 			goto clear_nic;
10923 
10924 		ret = hclge_init_roce_client_instance(ae_dev, vport);
10925 		if (ret)
10926 			goto clear_roce;
10927 
10928 		break;
10929 	case HNAE3_CLIENT_ROCE:
10930 		if (hnae3_dev_roce_supported(hdev)) {
10931 			hdev->roce_client = client;
10932 			vport->roce.client = client;
10933 		}
10934 
10935 		ret = hclge_init_roce_client_instance(ae_dev, vport);
10936 		if (ret)
10937 			goto clear_roce;
10938 
10939 		break;
10940 	default:
10941 		return -EINVAL;
10942 	}
10943 
10944 	return 0;
10945 
10946 clear_nic:
10947 	hdev->nic_client = NULL;
10948 	vport->nic.client = NULL;
10949 	return ret;
10950 clear_roce:
10951 	hdev->roce_client = NULL;
10952 	vport->roce.client = NULL;
10953 	return ret;
10954 }
10955 
hclge_uninit_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)10956 static void hclge_uninit_client_instance(struct hnae3_client *client,
10957 					 struct hnae3_ae_dev *ae_dev)
10958 {
10959 	struct hclge_dev *hdev = ae_dev->priv;
10960 	struct hclge_vport *vport = &hdev->vport[0];
10961 
10962 	if (hdev->roce_client) {
10963 		clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10964 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10965 			msleep(HCLGE_WAIT_RESET_DONE);
10966 
10967 		hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10968 		hdev->roce_client = NULL;
10969 		vport->roce.client = NULL;
10970 	}
10971 	if (client->type == HNAE3_CLIENT_ROCE)
10972 		return;
10973 	if (hdev->nic_client && client->ops->uninit_instance) {
10974 		clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10975 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10976 			msleep(HCLGE_WAIT_RESET_DONE);
10977 
10978 		client->ops->uninit_instance(&vport->nic, 0);
10979 		hdev->nic_client = NULL;
10980 		vport->nic.client = NULL;
10981 	}
10982 }
10983 
hclge_dev_mem_map(struct hclge_dev * hdev)10984 static int hclge_dev_mem_map(struct hclge_dev *hdev)
10985 {
10986 #define HCLGE_MEM_BAR		4
10987 
10988 	struct pci_dev *pdev = hdev->pdev;
10989 	struct hclge_hw *hw = &hdev->hw;
10990 
10991 	/* for device does not have device memory, return directly */
10992 	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
10993 		return 0;
10994 
10995 	hw->mem_base = devm_ioremap_wc(&pdev->dev,
10996 				       pci_resource_start(pdev, HCLGE_MEM_BAR),
10997 				       pci_resource_len(pdev, HCLGE_MEM_BAR));
10998 	if (!hw->mem_base) {
10999 		dev_err(&pdev->dev, "failed to map device memory\n");
11000 		return -EFAULT;
11001 	}
11002 
11003 	return 0;
11004 }
11005 
hclge_pci_init(struct hclge_dev * hdev)11006 static int hclge_pci_init(struct hclge_dev *hdev)
11007 {
11008 	struct pci_dev *pdev = hdev->pdev;
11009 	struct hclge_hw *hw;
11010 	int ret;
11011 
11012 	ret = pci_enable_device(pdev);
11013 	if (ret) {
11014 		dev_err(&pdev->dev, "failed to enable PCI device\n");
11015 		return ret;
11016 	}
11017 
11018 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11019 	if (ret) {
11020 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11021 		if (ret) {
11022 			dev_err(&pdev->dev,
11023 				"can't set consistent PCI DMA");
11024 			goto err_disable_device;
11025 		}
11026 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11027 	}
11028 
11029 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11030 	if (ret) {
11031 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11032 		goto err_disable_device;
11033 	}
11034 
11035 	pci_set_master(pdev);
11036 	hw = &hdev->hw;
11037 	hw->io_base = pcim_iomap(pdev, 2, 0);
11038 	if (!hw->io_base) {
11039 		dev_err(&pdev->dev, "Can't map configuration register space\n");
11040 		ret = -ENOMEM;
11041 		goto err_clr_master;
11042 	}
11043 
11044 	ret = hclge_dev_mem_map(hdev);
11045 	if (ret)
11046 		goto err_unmap_io_base;
11047 
11048 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11049 
11050 	return 0;
11051 
11052 err_unmap_io_base:
11053 	pcim_iounmap(pdev, hdev->hw.io_base);
11054 err_clr_master:
11055 	pci_clear_master(pdev);
11056 	pci_release_regions(pdev);
11057 err_disable_device:
11058 	pci_disable_device(pdev);
11059 
11060 	return ret;
11061 }
11062 
hclge_pci_uninit(struct hclge_dev * hdev)11063 static void hclge_pci_uninit(struct hclge_dev *hdev)
11064 {
11065 	struct pci_dev *pdev = hdev->pdev;
11066 
11067 	if (hdev->hw.mem_base)
11068 		devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11069 
11070 	pcim_iounmap(pdev, hdev->hw.io_base);
11071 	pci_free_irq_vectors(pdev);
11072 	pci_clear_master(pdev);
11073 	pci_release_mem_regions(pdev);
11074 	pci_disable_device(pdev);
11075 }
11076 
hclge_state_init(struct hclge_dev * hdev)11077 static void hclge_state_init(struct hclge_dev *hdev)
11078 {
11079 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11080 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11081 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11082 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11083 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11084 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11085 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11086 }
11087 
hclge_state_uninit(struct hclge_dev * hdev)11088 static void hclge_state_uninit(struct hclge_dev *hdev)
11089 {
11090 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11091 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11092 
11093 	if (hdev->reset_timer.function)
11094 		del_timer_sync(&hdev->reset_timer);
11095 	if (hdev->service_task.work.func)
11096 		cancel_delayed_work_sync(&hdev->service_task);
11097 }
11098 
hclge_reset_prepare_general(struct hnae3_ae_dev * ae_dev,enum hnae3_reset_type rst_type)11099 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11100 					enum hnae3_reset_type rst_type)
11101 {
11102 #define HCLGE_RESET_RETRY_WAIT_MS	500
11103 #define HCLGE_RESET_RETRY_CNT	5
11104 
11105 	struct hclge_dev *hdev = ae_dev->priv;
11106 	int retry_cnt = 0;
11107 	int ret;
11108 
11109 retry:
11110 	down(&hdev->reset_sem);
11111 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11112 	hdev->reset_type = rst_type;
11113 	ret = hclge_reset_prepare(hdev);
11114 	if (ret || hdev->reset_pending) {
11115 		dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11116 			ret);
11117 		if (hdev->reset_pending ||
11118 		    retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11119 			dev_err(&hdev->pdev->dev,
11120 				"reset_pending:0x%lx, retry_cnt:%d\n",
11121 				hdev->reset_pending, retry_cnt);
11122 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11123 			up(&hdev->reset_sem);
11124 			msleep(HCLGE_RESET_RETRY_WAIT_MS);
11125 			goto retry;
11126 		}
11127 	}
11128 
11129 	/* disable misc vector before reset done */
11130 	hclge_enable_vector(&hdev->misc_vector, false);
11131 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11132 
11133 	if (hdev->reset_type == HNAE3_FLR_RESET)
11134 		hdev->rst_stats.flr_rst_cnt++;
11135 }
11136 
hclge_reset_done(struct hnae3_ae_dev * ae_dev)11137 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11138 {
11139 	struct hclge_dev *hdev = ae_dev->priv;
11140 	int ret;
11141 
11142 	hclge_enable_vector(&hdev->misc_vector, true);
11143 
11144 	ret = hclge_reset_rebuild(hdev);
11145 	if (ret)
11146 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11147 
11148 	hdev->reset_type = HNAE3_NONE_RESET;
11149 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11150 	up(&hdev->reset_sem);
11151 }
11152 
hclge_clear_resetting_state(struct hclge_dev * hdev)11153 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11154 {
11155 	u16 i;
11156 
11157 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11158 		struct hclge_vport *vport = &hdev->vport[i];
11159 		int ret;
11160 
11161 		 /* Send cmd to clear VF's FUNC_RST_ING */
11162 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11163 		if (ret)
11164 			dev_warn(&hdev->pdev->dev,
11165 				 "clear vf(%u) rst failed %d!\n",
11166 				 vport->vport_id, ret);
11167 	}
11168 }
11169 
hclge_init_ae_dev(struct hnae3_ae_dev * ae_dev)11170 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11171 {
11172 	struct pci_dev *pdev = ae_dev->pdev;
11173 	struct hclge_dev *hdev;
11174 	int ret;
11175 
11176 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11177 	if (!hdev)
11178 		return -ENOMEM;
11179 
11180 	hdev->pdev = pdev;
11181 	hdev->ae_dev = ae_dev;
11182 	hdev->reset_type = HNAE3_NONE_RESET;
11183 	hdev->reset_level = HNAE3_FUNC_RESET;
11184 	ae_dev->priv = hdev;
11185 
11186 	/* HW supprt 2 layer vlan */
11187 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11188 
11189 	mutex_init(&hdev->vport_lock);
11190 	spin_lock_init(&hdev->fd_rule_lock);
11191 	sema_init(&hdev->reset_sem, 1);
11192 
11193 	ret = hclge_pci_init(hdev);
11194 	if (ret)
11195 		goto out;
11196 
11197 	/* Firmware command queue initialize */
11198 	ret = hclge_cmd_queue_init(hdev);
11199 	if (ret)
11200 		goto err_pci_uninit;
11201 
11202 	/* Firmware command initialize */
11203 	ret = hclge_cmd_init(hdev);
11204 	if (ret)
11205 		goto err_cmd_uninit;
11206 
11207 	ret = hclge_get_cap(hdev);
11208 	if (ret)
11209 		goto err_cmd_uninit;
11210 
11211 	ret = hclge_query_dev_specs(hdev);
11212 	if (ret) {
11213 		dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11214 			ret);
11215 		goto err_cmd_uninit;
11216 	}
11217 
11218 	ret = hclge_configure(hdev);
11219 	if (ret) {
11220 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11221 		goto err_cmd_uninit;
11222 	}
11223 
11224 	ret = hclge_init_msi(hdev);
11225 	if (ret) {
11226 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11227 		goto err_cmd_uninit;
11228 	}
11229 
11230 	ret = hclge_misc_irq_init(hdev);
11231 	if (ret)
11232 		goto err_msi_uninit;
11233 
11234 	ret = hclge_alloc_tqps(hdev);
11235 	if (ret) {
11236 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11237 		goto err_msi_irq_uninit;
11238 	}
11239 
11240 	ret = hclge_alloc_vport(hdev);
11241 	if (ret)
11242 		goto err_msi_irq_uninit;
11243 
11244 	ret = hclge_map_tqp(hdev);
11245 	if (ret)
11246 		goto err_msi_irq_uninit;
11247 
11248 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11249 	    !hnae3_dev_phy_imp_supported(hdev)) {
11250 		ret = hclge_mac_mdio_config(hdev);
11251 		if (ret)
11252 			goto err_msi_irq_uninit;
11253 	}
11254 
11255 	ret = hclge_init_umv_space(hdev);
11256 	if (ret)
11257 		goto err_mdiobus_unreg;
11258 
11259 	ret = hclge_mac_init(hdev);
11260 	if (ret) {
11261 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11262 		goto err_mdiobus_unreg;
11263 	}
11264 
11265 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11266 	if (ret) {
11267 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11268 		goto err_mdiobus_unreg;
11269 	}
11270 
11271 	ret = hclge_config_gro(hdev, true);
11272 	if (ret)
11273 		goto err_mdiobus_unreg;
11274 
11275 	ret = hclge_init_vlan_config(hdev);
11276 	if (ret) {
11277 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11278 		goto err_mdiobus_unreg;
11279 	}
11280 
11281 	ret = hclge_tm_schd_init(hdev);
11282 	if (ret) {
11283 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11284 		goto err_mdiobus_unreg;
11285 	}
11286 
11287 	ret = hclge_rss_init_cfg(hdev);
11288 	if (ret) {
11289 		dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11290 		goto err_mdiobus_unreg;
11291 	}
11292 
11293 	ret = hclge_rss_init_hw(hdev);
11294 	if (ret) {
11295 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11296 		goto err_mdiobus_unreg;
11297 	}
11298 
11299 	ret = init_mgr_tbl(hdev);
11300 	if (ret) {
11301 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11302 		goto err_mdiobus_unreg;
11303 	}
11304 
11305 	ret = hclge_init_fd_config(hdev);
11306 	if (ret) {
11307 		dev_err(&pdev->dev,
11308 			"fd table init fail, ret=%d\n", ret);
11309 		goto err_mdiobus_unreg;
11310 	}
11311 
11312 	INIT_KFIFO(hdev->mac_tnl_log);
11313 
11314 	hclge_dcb_ops_set(hdev);
11315 
11316 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11317 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11318 
11319 	/* Setup affinity after service timer setup because add_timer_on
11320 	 * is called in affinity notify.
11321 	 */
11322 	hclge_misc_affinity_setup(hdev);
11323 
11324 	hclge_clear_all_event_cause(hdev);
11325 	hclge_clear_resetting_state(hdev);
11326 
11327 	/* Log and clear the hw errors those already occurred */
11328 	hclge_handle_all_hns_hw_errors(ae_dev);
11329 
11330 	/* request delayed reset for the error recovery because an immediate
11331 	 * global reset on a PF affecting pending initialization of other PFs
11332 	 */
11333 	if (ae_dev->hw_err_reset_req) {
11334 		enum hnae3_reset_type reset_level;
11335 
11336 		reset_level = hclge_get_reset_level(ae_dev,
11337 						    &ae_dev->hw_err_reset_req);
11338 		hclge_set_def_reset_request(ae_dev, reset_level);
11339 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11340 	}
11341 
11342 	/* Enable MISC vector(vector0) */
11343 	hclge_enable_vector(&hdev->misc_vector, true);
11344 
11345 	hclge_state_init(hdev);
11346 	hdev->last_reset_time = jiffies;
11347 
11348 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11349 		 HCLGE_DRIVER_NAME);
11350 
11351 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11352 
11353 	return 0;
11354 
11355 err_mdiobus_unreg:
11356 	if (hdev->hw.mac.phydev)
11357 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
11358 err_msi_irq_uninit:
11359 	hclge_misc_irq_uninit(hdev);
11360 err_msi_uninit:
11361 	pci_free_irq_vectors(pdev);
11362 err_cmd_uninit:
11363 	hclge_cmd_uninit(hdev);
11364 err_pci_uninit:
11365 	pcim_iounmap(pdev, hdev->hw.io_base);
11366 	pci_clear_master(pdev);
11367 	pci_release_regions(pdev);
11368 	pci_disable_device(pdev);
11369 out:
11370 	mutex_destroy(&hdev->vport_lock);
11371 	return ret;
11372 }
11373 
hclge_stats_clear(struct hclge_dev * hdev)11374 static void hclge_stats_clear(struct hclge_dev *hdev)
11375 {
11376 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11377 }
11378 
hclge_set_mac_spoofchk(struct hclge_dev * hdev,int vf,bool enable)11379 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11380 {
11381 	return hclge_config_switch_param(hdev, vf, enable,
11382 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11383 }
11384 
hclge_set_vlan_spoofchk(struct hclge_dev * hdev,int vf,bool enable)11385 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11386 {
11387 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11388 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
11389 					  enable, vf);
11390 }
11391 
hclge_set_vf_spoofchk_hw(struct hclge_dev * hdev,int vf,bool enable)11392 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11393 {
11394 	int ret;
11395 
11396 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11397 	if (ret) {
11398 		dev_err(&hdev->pdev->dev,
11399 			"Set vf %d mac spoof check %s failed, ret=%d\n",
11400 			vf, enable ? "on" : "off", ret);
11401 		return ret;
11402 	}
11403 
11404 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11405 	if (ret)
11406 		dev_err(&hdev->pdev->dev,
11407 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
11408 			vf, enable ? "on" : "off", ret);
11409 
11410 	return ret;
11411 }
11412 
hclge_set_vf_spoofchk(struct hnae3_handle * handle,int vf,bool enable)11413 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11414 				 bool enable)
11415 {
11416 	struct hclge_vport *vport = hclge_get_vport(handle);
11417 	struct hclge_dev *hdev = vport->back;
11418 	u32 new_spoofchk = enable ? 1 : 0;
11419 	int ret;
11420 
11421 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11422 		return -EOPNOTSUPP;
11423 
11424 	vport = hclge_get_vf_vport(hdev, vf);
11425 	if (!vport)
11426 		return -EINVAL;
11427 
11428 	if (vport->vf_info.spoofchk == new_spoofchk)
11429 		return 0;
11430 
11431 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11432 		dev_warn(&hdev->pdev->dev,
11433 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11434 			 vf);
11435 	else if (enable && hclge_is_umv_space_full(vport, true))
11436 		dev_warn(&hdev->pdev->dev,
11437 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11438 			 vf);
11439 
11440 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11441 	if (ret)
11442 		return ret;
11443 
11444 	vport->vf_info.spoofchk = new_spoofchk;
11445 	return 0;
11446 }
11447 
hclge_reset_vport_spoofchk(struct hclge_dev * hdev)11448 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11449 {
11450 	struct hclge_vport *vport = hdev->vport;
11451 	int ret;
11452 	int i;
11453 
11454 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11455 		return 0;
11456 
11457 	/* resume the vf spoof check state after reset */
11458 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11459 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11460 					       vport->vf_info.spoofchk);
11461 		if (ret)
11462 			return ret;
11463 
11464 		vport++;
11465 	}
11466 
11467 	return 0;
11468 }
11469 
hclge_set_vf_trust(struct hnae3_handle * handle,int vf,bool enable)11470 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11471 {
11472 	struct hclge_vport *vport = hclge_get_vport(handle);
11473 	struct hclge_dev *hdev = vport->back;
11474 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
11475 	u32 new_trusted = enable ? 1 : 0;
11476 	bool en_bc_pmc;
11477 	int ret;
11478 
11479 	vport = hclge_get_vf_vport(hdev, vf);
11480 	if (!vport)
11481 		return -EINVAL;
11482 
11483 	if (vport->vf_info.trusted == new_trusted)
11484 		return 0;
11485 
11486 	/* Disable promisc mode for VF if it is not trusted any more. */
11487 	if (!enable && vport->vf_info.promisc_enable) {
11488 		en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
11489 		ret = hclge_set_vport_promisc_mode(vport, false, false,
11490 						   en_bc_pmc);
11491 		if (ret)
11492 			return ret;
11493 		vport->vf_info.promisc_enable = 0;
11494 		hclge_inform_vf_promisc_info(vport);
11495 	}
11496 
11497 	vport->vf_info.trusted = new_trusted;
11498 
11499 	return 0;
11500 }
11501 
hclge_reset_vf_rate(struct hclge_dev * hdev)11502 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11503 {
11504 	int ret;
11505 	int vf;
11506 
11507 	/* reset vf rate to default value */
11508 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11509 		struct hclge_vport *vport = &hdev->vport[vf];
11510 
11511 		vport->vf_info.max_tx_rate = 0;
11512 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11513 		if (ret)
11514 			dev_err(&hdev->pdev->dev,
11515 				"vf%d failed to reset to default, ret=%d\n",
11516 				vf - HCLGE_VF_VPORT_START_NUM, ret);
11517 	}
11518 }
11519 
hclge_vf_rate_param_check(struct hclge_dev * hdev,int min_tx_rate,int max_tx_rate)11520 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11521 				     int min_tx_rate, int max_tx_rate)
11522 {
11523 	if (min_tx_rate != 0 ||
11524 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11525 		dev_err(&hdev->pdev->dev,
11526 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11527 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11528 		return -EINVAL;
11529 	}
11530 
11531 	return 0;
11532 }
11533 
hclge_set_vf_rate(struct hnae3_handle * handle,int vf,int min_tx_rate,int max_tx_rate,bool force)11534 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11535 			     int min_tx_rate, int max_tx_rate, bool force)
11536 {
11537 	struct hclge_vport *vport = hclge_get_vport(handle);
11538 	struct hclge_dev *hdev = vport->back;
11539 	int ret;
11540 
11541 	ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11542 	if (ret)
11543 		return ret;
11544 
11545 	vport = hclge_get_vf_vport(hdev, vf);
11546 	if (!vport)
11547 		return -EINVAL;
11548 
11549 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11550 		return 0;
11551 
11552 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11553 	if (ret)
11554 		return ret;
11555 
11556 	vport->vf_info.max_tx_rate = max_tx_rate;
11557 
11558 	return 0;
11559 }
11560 
hclge_resume_vf_rate(struct hclge_dev * hdev)11561 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11562 {
11563 	struct hnae3_handle *handle = &hdev->vport->nic;
11564 	struct hclge_vport *vport;
11565 	int ret;
11566 	int vf;
11567 
11568 	/* resume the vf max_tx_rate after reset */
11569 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11570 		vport = hclge_get_vf_vport(hdev, vf);
11571 		if (!vport)
11572 			return -EINVAL;
11573 
11574 		/* zero means max rate, after reset, firmware already set it to
11575 		 * max rate, so just continue.
11576 		 */
11577 		if (!vport->vf_info.max_tx_rate)
11578 			continue;
11579 
11580 		ret = hclge_set_vf_rate(handle, vf, 0,
11581 					vport->vf_info.max_tx_rate, true);
11582 		if (ret) {
11583 			dev_err(&hdev->pdev->dev,
11584 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
11585 				vf, vport->vf_info.max_tx_rate, ret);
11586 			return ret;
11587 		}
11588 	}
11589 
11590 	return 0;
11591 }
11592 
hclge_reset_vport_state(struct hclge_dev * hdev)11593 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11594 {
11595 	struct hclge_vport *vport = hdev->vport;
11596 	int i;
11597 
11598 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11599 		hclge_vport_stop(vport);
11600 		vport++;
11601 	}
11602 }
11603 
hclge_reset_ae_dev(struct hnae3_ae_dev * ae_dev)11604 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11605 {
11606 	struct hclge_dev *hdev = ae_dev->priv;
11607 	struct pci_dev *pdev = ae_dev->pdev;
11608 	int ret;
11609 
11610 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11611 
11612 	hclge_stats_clear(hdev);
11613 	/* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11614 	 * so here should not clean table in memory.
11615 	 */
11616 	if (hdev->reset_type == HNAE3_IMP_RESET ||
11617 	    hdev->reset_type == HNAE3_GLOBAL_RESET) {
11618 		memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11619 		memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11620 		bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11621 		hclge_reset_umv_space(hdev);
11622 	}
11623 
11624 	ret = hclge_cmd_init(hdev);
11625 	if (ret) {
11626 		dev_err(&pdev->dev, "Cmd queue init failed\n");
11627 		return ret;
11628 	}
11629 
11630 	ret = hclge_map_tqp(hdev);
11631 	if (ret) {
11632 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11633 		return ret;
11634 	}
11635 
11636 	ret = hclge_mac_init(hdev);
11637 	if (ret) {
11638 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11639 		return ret;
11640 	}
11641 
11642 	ret = hclge_tp_port_init(hdev);
11643 	if (ret) {
11644 		dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11645 			ret);
11646 		return ret;
11647 	}
11648 
11649 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11650 	if (ret) {
11651 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11652 		return ret;
11653 	}
11654 
11655 	ret = hclge_config_gro(hdev, true);
11656 	if (ret)
11657 		return ret;
11658 
11659 	ret = hclge_init_vlan_config(hdev);
11660 	if (ret) {
11661 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11662 		return ret;
11663 	}
11664 
11665 	ret = hclge_tm_init_hw(hdev, true);
11666 	if (ret) {
11667 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11668 		return ret;
11669 	}
11670 
11671 	ret = hclge_rss_init_hw(hdev);
11672 	if (ret) {
11673 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11674 		return ret;
11675 	}
11676 
11677 	ret = init_mgr_tbl(hdev);
11678 	if (ret) {
11679 		dev_err(&pdev->dev,
11680 			"failed to reinit manager table, ret = %d\n", ret);
11681 		return ret;
11682 	}
11683 
11684 	ret = hclge_init_fd_config(hdev);
11685 	if (ret) {
11686 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11687 		return ret;
11688 	}
11689 
11690 	/* Log and clear the hw errors those already occurred */
11691 	hclge_handle_all_hns_hw_errors(ae_dev);
11692 
11693 	/* Re-enable the hw error interrupts because
11694 	 * the interrupts get disabled on global reset.
11695 	 */
11696 	ret = hclge_config_nic_hw_error(hdev, true);
11697 	if (ret) {
11698 		dev_err(&pdev->dev,
11699 			"fail(%d) to re-enable NIC hw error interrupts\n",
11700 			ret);
11701 		return ret;
11702 	}
11703 
11704 	if (hdev->roce_client) {
11705 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
11706 		if (ret) {
11707 			dev_err(&pdev->dev,
11708 				"fail(%d) to re-enable roce ras interrupts\n",
11709 				ret);
11710 			return ret;
11711 		}
11712 	}
11713 
11714 	hclge_reset_vport_state(hdev);
11715 	ret = hclge_reset_vport_spoofchk(hdev);
11716 	if (ret)
11717 		return ret;
11718 
11719 	ret = hclge_resume_vf_rate(hdev);
11720 	if (ret)
11721 		return ret;
11722 
11723 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11724 		 HCLGE_DRIVER_NAME);
11725 
11726 	return 0;
11727 }
11728 
hclge_uninit_ae_dev(struct hnae3_ae_dev * ae_dev)11729 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11730 {
11731 	struct hclge_dev *hdev = ae_dev->priv;
11732 	struct hclge_mac *mac = &hdev->hw.mac;
11733 
11734 	hclge_reset_vf_rate(hdev);
11735 	hclge_clear_vf_vlan(hdev);
11736 	hclge_misc_affinity_teardown(hdev);
11737 	hclge_state_uninit(hdev);
11738 	hclge_uninit_mac_table(hdev);
11739 	hclge_del_all_fd_entries(hdev);
11740 
11741 	if (mac->phydev)
11742 		mdiobus_unregister(mac->mdio_bus);
11743 
11744 	/* Disable MISC vector(vector0) */
11745 	hclge_enable_vector(&hdev->misc_vector, false);
11746 	synchronize_irq(hdev->misc_vector.vector_irq);
11747 
11748 	/* Disable all hw interrupts */
11749 	hclge_config_mac_tnl_int(hdev, false);
11750 	hclge_config_nic_hw_error(hdev, false);
11751 	hclge_config_rocee_ras_interrupt(hdev, false);
11752 
11753 	hclge_cmd_uninit(hdev);
11754 	hclge_misc_irq_uninit(hdev);
11755 	hclge_pci_uninit(hdev);
11756 	mutex_destroy(&hdev->vport_lock);
11757 	hclge_uninit_vport_vlan_table(hdev);
11758 	ae_dev->priv = NULL;
11759 }
11760 
hclge_get_max_channels(struct hnae3_handle * handle)11761 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11762 {
11763 	struct hclge_vport *vport = hclge_get_vport(handle);
11764 	struct hclge_dev *hdev = vport->back;
11765 
11766 	return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11767 }
11768 
hclge_get_channels(struct hnae3_handle * handle,struct ethtool_channels * ch)11769 static void hclge_get_channels(struct hnae3_handle *handle,
11770 			       struct ethtool_channels *ch)
11771 {
11772 	ch->max_combined = hclge_get_max_channels(handle);
11773 	ch->other_count = 1;
11774 	ch->max_other = 1;
11775 	ch->combined_count = handle->kinfo.rss_size;
11776 }
11777 
hclge_get_tqps_and_rss_info(struct hnae3_handle * handle,u16 * alloc_tqps,u16 * max_rss_size)11778 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11779 					u16 *alloc_tqps, u16 *max_rss_size)
11780 {
11781 	struct hclge_vport *vport = hclge_get_vport(handle);
11782 	struct hclge_dev *hdev = vport->back;
11783 
11784 	*alloc_tqps = vport->alloc_tqps;
11785 	*max_rss_size = hdev->pf_rss_size_max;
11786 }
11787 
hclge_set_channels(struct hnae3_handle * handle,u32 new_tqps_num,bool rxfh_configured)11788 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11789 			      bool rxfh_configured)
11790 {
11791 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
11792 	struct hclge_vport *vport = hclge_get_vport(handle);
11793 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11794 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11795 	struct hclge_dev *hdev = vport->back;
11796 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11797 	u16 cur_rss_size = kinfo->rss_size;
11798 	u16 cur_tqps = kinfo->num_tqps;
11799 	u16 tc_valid[HCLGE_MAX_TC_NUM];
11800 	u16 roundup_size;
11801 	u32 *rss_indir;
11802 	unsigned int i;
11803 	int ret;
11804 
11805 	kinfo->req_rss_size = new_tqps_num;
11806 
11807 	ret = hclge_tm_vport_map_update(hdev);
11808 	if (ret) {
11809 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
11810 		return ret;
11811 	}
11812 
11813 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
11814 	roundup_size = ilog2(roundup_size);
11815 	/* Set the RSS TC mode according to the new RSS size */
11816 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
11817 		tc_valid[i] = 0;
11818 
11819 		if (!(hdev->hw_tc_map & BIT(i)))
11820 			continue;
11821 
11822 		tc_valid[i] = 1;
11823 		tc_size[i] = roundup_size;
11824 		tc_offset[i] = kinfo->rss_size * i;
11825 	}
11826 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
11827 	if (ret)
11828 		return ret;
11829 
11830 	/* RSS indirection table has been configured by user */
11831 	if (rxfh_configured)
11832 		goto out;
11833 
11834 	/* Reinitializes the rss indirect table according to the new RSS size */
11835 	rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
11836 			    GFP_KERNEL);
11837 	if (!rss_indir)
11838 		return -ENOMEM;
11839 
11840 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
11841 		rss_indir[i] = i % kinfo->rss_size;
11842 
11843 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
11844 	if (ret)
11845 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
11846 			ret);
11847 
11848 	kfree(rss_indir);
11849 
11850 out:
11851 	if (!ret)
11852 		dev_info(&hdev->pdev->dev,
11853 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
11854 			 cur_rss_size, kinfo->rss_size,
11855 			 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
11856 
11857 	return ret;
11858 }
11859 
hclge_get_regs_num(struct hclge_dev * hdev,u32 * regs_num_32_bit,u32 * regs_num_64_bit)11860 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
11861 			      u32 *regs_num_64_bit)
11862 {
11863 	struct hclge_desc desc;
11864 	u32 total_num;
11865 	int ret;
11866 
11867 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
11868 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11869 	if (ret) {
11870 		dev_err(&hdev->pdev->dev,
11871 			"Query register number cmd failed, ret = %d.\n", ret);
11872 		return ret;
11873 	}
11874 
11875 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
11876 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
11877 
11878 	total_num = *regs_num_32_bit + *regs_num_64_bit;
11879 	if (!total_num)
11880 		return -EINVAL;
11881 
11882 	return 0;
11883 }
11884 
hclge_get_32_bit_regs(struct hclge_dev * hdev,u32 regs_num,void * data)11885 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11886 				 void *data)
11887 {
11888 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
11889 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
11890 
11891 	struct hclge_desc *desc;
11892 	u32 *reg_val = data;
11893 	__le32 *desc_data;
11894 	int nodata_num;
11895 	int cmd_num;
11896 	int i, k, n;
11897 	int ret;
11898 
11899 	if (regs_num == 0)
11900 		return 0;
11901 
11902 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
11903 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
11904 			       HCLGE_32_BIT_REG_RTN_DATANUM);
11905 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11906 	if (!desc)
11907 		return -ENOMEM;
11908 
11909 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
11910 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11911 	if (ret) {
11912 		dev_err(&hdev->pdev->dev,
11913 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
11914 		kfree(desc);
11915 		return ret;
11916 	}
11917 
11918 	for (i = 0; i < cmd_num; i++) {
11919 		if (i == 0) {
11920 			desc_data = (__le32 *)(&desc[i].data[0]);
11921 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
11922 		} else {
11923 			desc_data = (__le32 *)(&desc[i]);
11924 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
11925 		}
11926 		for (k = 0; k < n; k++) {
11927 			*reg_val++ = le32_to_cpu(*desc_data++);
11928 
11929 			regs_num--;
11930 			if (!regs_num)
11931 				break;
11932 		}
11933 	}
11934 
11935 	kfree(desc);
11936 	return 0;
11937 }
11938 
hclge_get_64_bit_regs(struct hclge_dev * hdev,u32 regs_num,void * data)11939 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11940 				 void *data)
11941 {
11942 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
11943 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
11944 
11945 	struct hclge_desc *desc;
11946 	u64 *reg_val = data;
11947 	__le64 *desc_data;
11948 	int nodata_len;
11949 	int cmd_num;
11950 	int i, k, n;
11951 	int ret;
11952 
11953 	if (regs_num == 0)
11954 		return 0;
11955 
11956 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
11957 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
11958 			       HCLGE_64_BIT_REG_RTN_DATANUM);
11959 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11960 	if (!desc)
11961 		return -ENOMEM;
11962 
11963 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
11964 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11965 	if (ret) {
11966 		dev_err(&hdev->pdev->dev,
11967 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
11968 		kfree(desc);
11969 		return ret;
11970 	}
11971 
11972 	for (i = 0; i < cmd_num; i++) {
11973 		if (i == 0) {
11974 			desc_data = (__le64 *)(&desc[i].data[0]);
11975 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
11976 		} else {
11977 			desc_data = (__le64 *)(&desc[i]);
11978 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
11979 		}
11980 		for (k = 0; k < n; k++) {
11981 			*reg_val++ = le64_to_cpu(*desc_data++);
11982 
11983 			regs_num--;
11984 			if (!regs_num)
11985 				break;
11986 		}
11987 	}
11988 
11989 	kfree(desc);
11990 	return 0;
11991 }
11992 
11993 #define MAX_SEPARATE_NUM	4
11994 #define SEPARATOR_VALUE		0xFDFCFBFA
11995 #define REG_NUM_PER_LINE	4
11996 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
11997 #define REG_SEPARATOR_LINE	1
11998 #define REG_NUM_REMAIN_MASK	3
11999 
hclge_query_bd_num_cmd_send(struct hclge_dev * hdev,struct hclge_desc * desc)12000 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12001 {
12002 	int i;
12003 
12004 	/* initialize command BD except the last one */
12005 	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12006 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12007 					   true);
12008 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12009 	}
12010 
12011 	/* initialize the last command BD */
12012 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12013 
12014 	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12015 }
12016 
hclge_get_dfx_reg_bd_num(struct hclge_dev * hdev,int * bd_num_list,u32 type_num)12017 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12018 				    int *bd_num_list,
12019 				    u32 type_num)
12020 {
12021 	u32 entries_per_desc, desc_index, index, offset, i;
12022 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12023 	int ret;
12024 
12025 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
12026 	if (ret) {
12027 		dev_err(&hdev->pdev->dev,
12028 			"Get dfx bd num fail, status is %d.\n", ret);
12029 		return ret;
12030 	}
12031 
12032 	entries_per_desc = ARRAY_SIZE(desc[0].data);
12033 	for (i = 0; i < type_num; i++) {
12034 		offset = hclge_dfx_bd_offset_list[i];
12035 		index = offset % entries_per_desc;
12036 		desc_index = offset / entries_per_desc;
12037 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12038 	}
12039 
12040 	return ret;
12041 }
12042 
hclge_dfx_reg_cmd_send(struct hclge_dev * hdev,struct hclge_desc * desc_src,int bd_num,enum hclge_opcode_type cmd)12043 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12044 				  struct hclge_desc *desc_src, int bd_num,
12045 				  enum hclge_opcode_type cmd)
12046 {
12047 	struct hclge_desc *desc = desc_src;
12048 	int i, ret;
12049 
12050 	hclge_cmd_setup_basic_desc(desc, cmd, true);
12051 	for (i = 0; i < bd_num - 1; i++) {
12052 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12053 		desc++;
12054 		hclge_cmd_setup_basic_desc(desc, cmd, true);
12055 	}
12056 
12057 	desc = desc_src;
12058 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12059 	if (ret)
12060 		dev_err(&hdev->pdev->dev,
12061 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12062 			cmd, ret);
12063 
12064 	return ret;
12065 }
12066 
hclge_dfx_reg_fetch_data(struct hclge_desc * desc_src,int bd_num,void * data)12067 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12068 				    void *data)
12069 {
12070 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12071 	struct hclge_desc *desc = desc_src;
12072 	u32 *reg = data;
12073 
12074 	entries_per_desc = ARRAY_SIZE(desc->data);
12075 	reg_num = entries_per_desc * bd_num;
12076 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12077 	for (i = 0; i < reg_num; i++) {
12078 		index = i % entries_per_desc;
12079 		desc_index = i / entries_per_desc;
12080 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
12081 	}
12082 	for (i = 0; i < separator_num; i++)
12083 		*reg++ = SEPARATOR_VALUE;
12084 
12085 	return reg_num + separator_num;
12086 }
12087 
hclge_get_dfx_reg_len(struct hclge_dev * hdev,int * len)12088 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12089 {
12090 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12091 	int data_len_per_desc, bd_num, i;
12092 	int *bd_num_list;
12093 	u32 data_len;
12094 	int ret;
12095 
12096 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12097 	if (!bd_num_list)
12098 		return -ENOMEM;
12099 
12100 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12101 	if (ret) {
12102 		dev_err(&hdev->pdev->dev,
12103 			"Get dfx reg bd num fail, status is %d.\n", ret);
12104 		goto out;
12105 	}
12106 
12107 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
12108 	*len = 0;
12109 	for (i = 0; i < dfx_reg_type_num; i++) {
12110 		bd_num = bd_num_list[i];
12111 		data_len = data_len_per_desc * bd_num;
12112 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12113 	}
12114 
12115 out:
12116 	kfree(bd_num_list);
12117 	return ret;
12118 }
12119 
hclge_get_dfx_reg(struct hclge_dev * hdev,void * data)12120 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12121 {
12122 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12123 	int bd_num, bd_num_max, buf_len, i;
12124 	struct hclge_desc *desc_src;
12125 	int *bd_num_list;
12126 	u32 *reg = data;
12127 	int ret;
12128 
12129 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12130 	if (!bd_num_list)
12131 		return -ENOMEM;
12132 
12133 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12134 	if (ret) {
12135 		dev_err(&hdev->pdev->dev,
12136 			"Get dfx reg bd num fail, status is %d.\n", ret);
12137 		goto out;
12138 	}
12139 
12140 	bd_num_max = bd_num_list[0];
12141 	for (i = 1; i < dfx_reg_type_num; i++)
12142 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12143 
12144 	buf_len = sizeof(*desc_src) * bd_num_max;
12145 	desc_src = kzalloc(buf_len, GFP_KERNEL);
12146 	if (!desc_src) {
12147 		ret = -ENOMEM;
12148 		goto out;
12149 	}
12150 
12151 	for (i = 0; i < dfx_reg_type_num; i++) {
12152 		bd_num = bd_num_list[i];
12153 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12154 					     hclge_dfx_reg_opcode_list[i]);
12155 		if (ret) {
12156 			dev_err(&hdev->pdev->dev,
12157 				"Get dfx reg fail, status is %d.\n", ret);
12158 			break;
12159 		}
12160 
12161 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12162 	}
12163 
12164 	kfree(desc_src);
12165 out:
12166 	kfree(bd_num_list);
12167 	return ret;
12168 }
12169 
hclge_fetch_pf_reg(struct hclge_dev * hdev,void * data,struct hnae3_knic_private_info * kinfo)12170 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12171 			      struct hnae3_knic_private_info *kinfo)
12172 {
12173 #define HCLGE_RING_REG_OFFSET		0x200
12174 #define HCLGE_RING_INT_REG_OFFSET	0x4
12175 
12176 	int i, j, reg_num, separator_num;
12177 	int data_num_sum;
12178 	u32 *reg = data;
12179 
12180 	/* fetching per-PF registers valus from PF PCIe register space */
12181 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12182 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12183 	for (i = 0; i < reg_num; i++)
12184 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12185 	for (i = 0; i < separator_num; i++)
12186 		*reg++ = SEPARATOR_VALUE;
12187 	data_num_sum = reg_num + separator_num;
12188 
12189 	reg_num = ARRAY_SIZE(common_reg_addr_list);
12190 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12191 	for (i = 0; i < reg_num; i++)
12192 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12193 	for (i = 0; i < separator_num; i++)
12194 		*reg++ = SEPARATOR_VALUE;
12195 	data_num_sum += reg_num + separator_num;
12196 
12197 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
12198 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12199 	for (j = 0; j < kinfo->num_tqps; j++) {
12200 		for (i = 0; i < reg_num; i++)
12201 			*reg++ = hclge_read_dev(&hdev->hw,
12202 						ring_reg_addr_list[i] +
12203 						HCLGE_RING_REG_OFFSET * j);
12204 		for (i = 0; i < separator_num; i++)
12205 			*reg++ = SEPARATOR_VALUE;
12206 	}
12207 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12208 
12209 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12210 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12211 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
12212 		for (i = 0; i < reg_num; i++)
12213 			*reg++ = hclge_read_dev(&hdev->hw,
12214 						tqp_intr_reg_addr_list[i] +
12215 						HCLGE_RING_INT_REG_OFFSET * j);
12216 		for (i = 0; i < separator_num; i++)
12217 			*reg++ = SEPARATOR_VALUE;
12218 	}
12219 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12220 
12221 	return data_num_sum;
12222 }
12223 
hclge_get_regs_len(struct hnae3_handle * handle)12224 static int hclge_get_regs_len(struct hnae3_handle *handle)
12225 {
12226 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12227 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12228 	struct hclge_vport *vport = hclge_get_vport(handle);
12229 	struct hclge_dev *hdev = vport->back;
12230 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12231 	int regs_lines_32_bit, regs_lines_64_bit;
12232 	int ret;
12233 
12234 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12235 	if (ret) {
12236 		dev_err(&hdev->pdev->dev,
12237 			"Get register number failed, ret = %d.\n", ret);
12238 		return ret;
12239 	}
12240 
12241 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12242 	if (ret) {
12243 		dev_err(&hdev->pdev->dev,
12244 			"Get dfx reg len failed, ret = %d.\n", ret);
12245 		return ret;
12246 	}
12247 
12248 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12249 		REG_SEPARATOR_LINE;
12250 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12251 		REG_SEPARATOR_LINE;
12252 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12253 		REG_SEPARATOR_LINE;
12254 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12255 		REG_SEPARATOR_LINE;
12256 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12257 		REG_SEPARATOR_LINE;
12258 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12259 		REG_SEPARATOR_LINE;
12260 
12261 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12262 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12263 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12264 }
12265 
hclge_get_regs(struct hnae3_handle * handle,u32 * version,void * data)12266 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12267 			   void *data)
12268 {
12269 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12270 	struct hclge_vport *vport = hclge_get_vport(handle);
12271 	struct hclge_dev *hdev = vport->back;
12272 	u32 regs_num_32_bit, regs_num_64_bit;
12273 	int i, reg_num, separator_num, ret;
12274 	u32 *reg = data;
12275 
12276 	*version = hdev->fw_version;
12277 
12278 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12279 	if (ret) {
12280 		dev_err(&hdev->pdev->dev,
12281 			"Get register number failed, ret = %d.\n", ret);
12282 		return;
12283 	}
12284 
12285 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12286 
12287 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12288 	if (ret) {
12289 		dev_err(&hdev->pdev->dev,
12290 			"Get 32 bit register failed, ret = %d.\n", ret);
12291 		return;
12292 	}
12293 	reg_num = regs_num_32_bit;
12294 	reg += reg_num;
12295 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12296 	for (i = 0; i < separator_num; i++)
12297 		*reg++ = SEPARATOR_VALUE;
12298 
12299 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12300 	if (ret) {
12301 		dev_err(&hdev->pdev->dev,
12302 			"Get 64 bit register failed, ret = %d.\n", ret);
12303 		return;
12304 	}
12305 	reg_num = regs_num_64_bit * 2;
12306 	reg += reg_num;
12307 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12308 	for (i = 0; i < separator_num; i++)
12309 		*reg++ = SEPARATOR_VALUE;
12310 
12311 	ret = hclge_get_dfx_reg(hdev, reg);
12312 	if (ret)
12313 		dev_err(&hdev->pdev->dev,
12314 			"Get dfx register failed, ret = %d.\n", ret);
12315 }
12316 
hclge_set_led_status(struct hclge_dev * hdev,u8 locate_led_status)12317 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12318 {
12319 	struct hclge_set_led_state_cmd *req;
12320 	struct hclge_desc desc;
12321 	int ret;
12322 
12323 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12324 
12325 	req = (struct hclge_set_led_state_cmd *)desc.data;
12326 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12327 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12328 
12329 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12330 	if (ret)
12331 		dev_err(&hdev->pdev->dev,
12332 			"Send set led state cmd error, ret =%d\n", ret);
12333 
12334 	return ret;
12335 }
12336 
12337 enum hclge_led_status {
12338 	HCLGE_LED_OFF,
12339 	HCLGE_LED_ON,
12340 	HCLGE_LED_NO_CHANGE = 0xFF,
12341 };
12342 
hclge_set_led_id(struct hnae3_handle * handle,enum ethtool_phys_id_state status)12343 static int hclge_set_led_id(struct hnae3_handle *handle,
12344 			    enum ethtool_phys_id_state status)
12345 {
12346 	struct hclge_vport *vport = hclge_get_vport(handle);
12347 	struct hclge_dev *hdev = vport->back;
12348 
12349 	switch (status) {
12350 	case ETHTOOL_ID_ACTIVE:
12351 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
12352 	case ETHTOOL_ID_INACTIVE:
12353 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12354 	default:
12355 		return -EINVAL;
12356 	}
12357 }
12358 
hclge_get_link_mode(struct hnae3_handle * handle,unsigned long * supported,unsigned long * advertising)12359 static void hclge_get_link_mode(struct hnae3_handle *handle,
12360 				unsigned long *supported,
12361 				unsigned long *advertising)
12362 {
12363 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12364 	struct hclge_vport *vport = hclge_get_vport(handle);
12365 	struct hclge_dev *hdev = vport->back;
12366 	unsigned int idx = 0;
12367 
12368 	for (; idx < size; idx++) {
12369 		supported[idx] = hdev->hw.mac.supported[idx];
12370 		advertising[idx] = hdev->hw.mac.advertising[idx];
12371 	}
12372 }
12373 
hclge_gro_en(struct hnae3_handle * handle,bool enable)12374 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12375 {
12376 	struct hclge_vport *vport = hclge_get_vport(handle);
12377 	struct hclge_dev *hdev = vport->back;
12378 
12379 	return hclge_config_gro(hdev, enable);
12380 }
12381 
hclge_sync_promisc_mode(struct hclge_dev * hdev)12382 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12383 {
12384 	struct hclge_vport *vport = &hdev->vport[0];
12385 	struct hnae3_handle *handle = &vport->nic;
12386 	u8 tmp_flags;
12387 	int ret;
12388 
12389 	if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12390 		set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
12391 		vport->last_promisc_flags = vport->overflow_promisc_flags;
12392 	}
12393 
12394 	if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
12395 		tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12396 		ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12397 					     tmp_flags & HNAE3_MPE);
12398 		if (!ret) {
12399 			clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
12400 			hclge_enable_vlan_filter(handle,
12401 						 tmp_flags & HNAE3_VLAN_FLTR);
12402 		}
12403 	}
12404 }
12405 
hclge_module_existed(struct hclge_dev * hdev)12406 static bool hclge_module_existed(struct hclge_dev *hdev)
12407 {
12408 	struct hclge_desc desc;
12409 	u32 existed;
12410 	int ret;
12411 
12412 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12413 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12414 	if (ret) {
12415 		dev_err(&hdev->pdev->dev,
12416 			"failed to get SFP exist state, ret = %d\n", ret);
12417 		return false;
12418 	}
12419 
12420 	existed = le32_to_cpu(desc.data[0]);
12421 
12422 	return existed != 0;
12423 }
12424 
12425 /* need 6 bds(total 140 bytes) in one reading
12426  * return the number of bytes actually read, 0 means read failed.
12427  */
hclge_get_sfp_eeprom_info(struct hclge_dev * hdev,u32 offset,u32 len,u8 * data)12428 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12429 				     u32 len, u8 *data)
12430 {
12431 	struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12432 	struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12433 	u16 read_len;
12434 	u16 copy_len;
12435 	int ret;
12436 	int i;
12437 
12438 	/* setup all 6 bds to read module eeprom info. */
12439 	for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12440 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12441 					   true);
12442 
12443 		/* bd0~bd4 need next flag */
12444 		if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12445 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12446 	}
12447 
12448 	/* setup bd0, this bd contains offset and read length. */
12449 	sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12450 	sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12451 	read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12452 	sfp_info_bd0->read_len = cpu_to_le16(read_len);
12453 
12454 	ret = hclge_cmd_send(&hdev->hw, desc, i);
12455 	if (ret) {
12456 		dev_err(&hdev->pdev->dev,
12457 			"failed to get SFP eeprom info, ret = %d\n", ret);
12458 		return 0;
12459 	}
12460 
12461 	/* copy sfp info from bd0 to out buffer. */
12462 	copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12463 	memcpy(data, sfp_info_bd0->data, copy_len);
12464 	read_len = copy_len;
12465 
12466 	/* copy sfp info from bd1~bd5 to out buffer if needed. */
12467 	for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12468 		if (read_len >= len)
12469 			return read_len;
12470 
12471 		copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12472 		memcpy(data + read_len, desc[i].data, copy_len);
12473 		read_len += copy_len;
12474 	}
12475 
12476 	return read_len;
12477 }
12478 
hclge_get_module_eeprom(struct hnae3_handle * handle,u32 offset,u32 len,u8 * data)12479 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12480 				   u32 len, u8 *data)
12481 {
12482 	struct hclge_vport *vport = hclge_get_vport(handle);
12483 	struct hclge_dev *hdev = vport->back;
12484 	u32 read_len = 0;
12485 	u16 data_len;
12486 
12487 	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12488 		return -EOPNOTSUPP;
12489 
12490 	if (!hclge_module_existed(hdev))
12491 		return -ENXIO;
12492 
12493 	while (read_len < len) {
12494 		data_len = hclge_get_sfp_eeprom_info(hdev,
12495 						     offset + read_len,
12496 						     len - read_len,
12497 						     data + read_len);
12498 		if (!data_len)
12499 			return -EIO;
12500 
12501 		read_len += data_len;
12502 	}
12503 
12504 	return 0;
12505 }
12506 
12507 static const struct hnae3_ae_ops hclge_ops = {
12508 	.init_ae_dev = hclge_init_ae_dev,
12509 	.uninit_ae_dev = hclge_uninit_ae_dev,
12510 	.reset_prepare = hclge_reset_prepare_general,
12511 	.reset_done = hclge_reset_done,
12512 	.init_client_instance = hclge_init_client_instance,
12513 	.uninit_client_instance = hclge_uninit_client_instance,
12514 	.map_ring_to_vector = hclge_map_ring_to_vector,
12515 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12516 	.get_vector = hclge_get_vector,
12517 	.put_vector = hclge_put_vector,
12518 	.set_promisc_mode = hclge_set_promisc_mode,
12519 	.request_update_promisc_mode = hclge_request_update_promisc_mode,
12520 	.set_loopback = hclge_set_loopback,
12521 	.start = hclge_ae_start,
12522 	.stop = hclge_ae_stop,
12523 	.client_start = hclge_client_start,
12524 	.client_stop = hclge_client_stop,
12525 	.get_status = hclge_get_status,
12526 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
12527 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12528 	.get_media_type = hclge_get_media_type,
12529 	.check_port_speed = hclge_check_port_speed,
12530 	.get_fec = hclge_get_fec,
12531 	.set_fec = hclge_set_fec,
12532 	.get_rss_key_size = hclge_get_rss_key_size,
12533 	.get_rss = hclge_get_rss,
12534 	.set_rss = hclge_set_rss,
12535 	.set_rss_tuple = hclge_set_rss_tuple,
12536 	.get_rss_tuple = hclge_get_rss_tuple,
12537 	.get_tc_size = hclge_get_tc_size,
12538 	.get_mac_addr = hclge_get_mac_addr,
12539 	.set_mac_addr = hclge_set_mac_addr,
12540 	.do_ioctl = hclge_do_ioctl,
12541 	.add_uc_addr = hclge_add_uc_addr,
12542 	.rm_uc_addr = hclge_rm_uc_addr,
12543 	.add_mc_addr = hclge_add_mc_addr,
12544 	.rm_mc_addr = hclge_rm_mc_addr,
12545 	.set_autoneg = hclge_set_autoneg,
12546 	.get_autoneg = hclge_get_autoneg,
12547 	.restart_autoneg = hclge_restart_autoneg,
12548 	.halt_autoneg = hclge_halt_autoneg,
12549 	.get_pauseparam = hclge_get_pauseparam,
12550 	.set_pauseparam = hclge_set_pauseparam,
12551 	.set_mtu = hclge_set_mtu,
12552 	.reset_queue = hclge_reset_tqp,
12553 	.get_stats = hclge_get_stats,
12554 	.get_mac_stats = hclge_get_mac_stat,
12555 	.update_stats = hclge_update_stats,
12556 	.get_strings = hclge_get_strings,
12557 	.get_sset_count = hclge_get_sset_count,
12558 	.get_fw_version = hclge_get_fw_version,
12559 	.get_mdix_mode = hclge_get_mdix_mode,
12560 	.enable_vlan_filter = hclge_enable_vlan_filter,
12561 	.set_vlan_filter = hclge_set_vlan_filter,
12562 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12563 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12564 	.reset_event = hclge_reset_event,
12565 	.get_reset_level = hclge_get_reset_level,
12566 	.set_default_reset_request = hclge_set_def_reset_request,
12567 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12568 	.set_channels = hclge_set_channels,
12569 	.get_channels = hclge_get_channels,
12570 	.get_regs_len = hclge_get_regs_len,
12571 	.get_regs = hclge_get_regs,
12572 	.set_led_id = hclge_set_led_id,
12573 	.get_link_mode = hclge_get_link_mode,
12574 	.add_fd_entry = hclge_add_fd_entry,
12575 	.del_fd_entry = hclge_del_fd_entry,
12576 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12577 	.get_fd_rule_info = hclge_get_fd_rule_info,
12578 	.get_fd_all_rules = hclge_get_all_rules,
12579 	.enable_fd = hclge_enable_fd,
12580 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
12581 	.dbg_run_cmd = hclge_dbg_run_cmd,
12582 	.dbg_read_cmd = hclge_dbg_read_cmd,
12583 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
12584 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
12585 	.ae_dev_resetting = hclge_ae_dev_resetting,
12586 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12587 	.set_gro_en = hclge_gro_en,
12588 	.get_global_queue_id = hclge_covert_handle_qid_global,
12589 	.set_timer_task = hclge_set_timer_task,
12590 	.mac_connect_phy = hclge_mac_connect_phy,
12591 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
12592 	.get_vf_config = hclge_get_vf_config,
12593 	.set_vf_link_state = hclge_set_vf_link_state,
12594 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
12595 	.set_vf_trust = hclge_set_vf_trust,
12596 	.set_vf_rate = hclge_set_vf_rate,
12597 	.set_vf_mac = hclge_set_vf_mac,
12598 	.get_module_eeprom = hclge_get_module_eeprom,
12599 	.get_cmdq_stat = hclge_get_cmdq_stat,
12600 	.add_cls_flower = hclge_add_cls_flower,
12601 	.del_cls_flower = hclge_del_cls_flower,
12602 	.cls_flower_active = hclge_is_cls_flower_active,
12603 	.get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12604 	.set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12605 };
12606 
12607 static struct hnae3_ae_algo ae_algo = {
12608 	.ops = &hclge_ops,
12609 	.pdev_id_table = ae_algo_pci_tbl,
12610 };
12611 
hclge_init(void)12612 static int hclge_init(void)
12613 {
12614 	pr_info("%s is initializing\n", HCLGE_NAME);
12615 
12616 	hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
12617 	if (!hclge_wq) {
12618 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12619 		return -ENOMEM;
12620 	}
12621 
12622 	hnae3_register_ae_algo(&ae_algo);
12623 
12624 	return 0;
12625 }
12626 
hclge_exit(void)12627 static void hclge_exit(void)
12628 {
12629 	hnae3_unregister_ae_algo(&ae_algo);
12630 	destroy_workqueue(hclge_wq);
12631 }
12632 module_init(hclge_init);
12633 module_exit(hclge_exit);
12634 
12635 MODULE_LICENSE("GPL");
12636 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12637 MODULE_DESCRIPTION("HCLGE Driver");
12638 MODULE_VERSION(HCLGE_MOD_VERSION);
12639