1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2021-2021 Hisilicon Limited.
3 #include <linux/skbuff.h>
4 
5 #include "hnae3.h"
6 #include "hclge_comm_cmd.h"
7 #include "hclge_comm_rss.h"
8 
9 static const u8 hclge_comm_hash_key[] = {
10 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
11 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
12 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
13 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
14 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
15 };
16 
17 static void
18 hclge_comm_init_rss_tuple(struct hnae3_ae_dev *ae_dev,
19 			  struct hclge_comm_rss_tuple_cfg *rss_tuple_cfg)
20 {
21 	rss_tuple_cfg->ipv4_tcp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
22 	rss_tuple_cfg->ipv4_udp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
23 	rss_tuple_cfg->ipv4_sctp_en = HCLGE_COMM_RSS_INPUT_TUPLE_SCTP;
24 	rss_tuple_cfg->ipv4_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
25 	rss_tuple_cfg->ipv6_tcp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
26 	rss_tuple_cfg->ipv6_udp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
27 	rss_tuple_cfg->ipv6_sctp_en =
28 		ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
29 		HCLGE_COMM_RSS_INPUT_TUPLE_SCTP_NO_PORT :
30 		HCLGE_COMM_RSS_INPUT_TUPLE_SCTP;
31 	rss_tuple_cfg->ipv6_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
32 }
33 
34 int hclge_comm_rss_init_cfg(struct hnae3_handle *nic,
35 			    struct hnae3_ae_dev *ae_dev,
36 			    struct hclge_comm_rss_cfg *rss_cfg)
37 {
38 	u16 rss_ind_tbl_size = ae_dev->dev_specs.rss_ind_tbl_size;
39 	int rss_algo = HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ;
40 	u16 *rss_ind_tbl;
41 
42 	if (nic->flags & HNAE3_SUPPORT_VF)
43 		rss_cfg->rss_size = nic->kinfo.rss_size;
44 
45 	if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
46 		rss_algo = HCLGE_COMM_RSS_HASH_ALGO_SIMPLE;
47 
48 	hclge_comm_init_rss_tuple(ae_dev, &rss_cfg->rss_tuple_sets);
49 
50 	rss_cfg->rss_algo = rss_algo;
51 
52 	rss_ind_tbl = devm_kcalloc(&ae_dev->pdev->dev, rss_ind_tbl_size,
53 				   sizeof(*rss_ind_tbl), GFP_KERNEL);
54 	if (!rss_ind_tbl)
55 		return -ENOMEM;
56 
57 	rss_cfg->rss_indirection_tbl = rss_ind_tbl;
58 	memcpy(rss_cfg->rss_hash_key, hclge_comm_hash_key,
59 	       HCLGE_COMM_RSS_KEY_SIZE);
60 
61 	hclge_comm_rss_indir_init_cfg(ae_dev, rss_cfg);
62 
63 	return 0;
64 }
65 
66 void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset,
67 				u16 *tc_valid, u16 *tc_size)
68 {
69 	u16 roundup_size;
70 	u32 i;
71 
72 	roundup_size = roundup_pow_of_two(rss_size);
73 	roundup_size = ilog2(roundup_size);
74 
75 	for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) {
76 		tc_valid[i] = 1;
77 		tc_size[i] = roundup_size;
78 		tc_offset[i] = (hw_tc_map & BIT(i)) ? rss_size * i : 0;
79 	}
80 }
81 
82 int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset,
83 			       u16 *tc_valid, u16 *tc_size)
84 {
85 	struct hclge_comm_rss_tc_mode_cmd *req;
86 	struct hclge_desc desc;
87 	unsigned int i;
88 	int ret;
89 
90 	req = (struct hclge_comm_rss_tc_mode_cmd *)desc.data;
91 
92 	hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
93 	for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) {
94 		u16 mode = 0;
95 
96 		hnae3_set_bit(mode, HCLGE_COMM_RSS_TC_VALID_B,
97 			      (tc_valid[i] & 0x1));
98 		hnae3_set_field(mode, HCLGE_COMM_RSS_TC_SIZE_M,
99 				HCLGE_COMM_RSS_TC_SIZE_S, tc_size[i]);
100 		hnae3_set_bit(mode, HCLGE_COMM_RSS_TC_SIZE_MSB_B,
101 			      tc_size[i] >> HCLGE_COMM_RSS_TC_SIZE_MSB_OFFSET &
102 			      0x1);
103 		hnae3_set_field(mode, HCLGE_COMM_RSS_TC_OFFSET_M,
104 				HCLGE_COMM_RSS_TC_OFFSET_S, tc_offset[i]);
105 
106 		req->rss_tc_mode[i] = cpu_to_le16(mode);
107 	}
108 
109 	ret = hclge_comm_cmd_send(hw, &desc, 1);
110 	if (ret)
111 		dev_err(&hw->cmq.csq.pdev->dev,
112 			"failed to set rss tc mode, ret = %d.\n", ret);
113 
114 	return ret;
115 }
116 
117 int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
118 				struct hclge_comm_hw *hw, const u8 *key,
119 				const u8 hfunc)
120 {
121 	u8 hash_algo;
122 	int ret;
123 
124 	ret = hclge_comm_parse_rss_hfunc(rss_cfg, hfunc, &hash_algo);
125 	if (ret)
126 		return ret;
127 
128 	/* Set the RSS Hash Key if specififed by the user */
129 	if (key) {
130 		ret = hclge_comm_set_rss_algo_key(hw, hash_algo, key);
131 		if (ret)
132 			return ret;
133 
134 		/* Update the shadow RSS key with user specified qids */
135 		memcpy(rss_cfg->rss_hash_key, key, HCLGE_COMM_RSS_KEY_SIZE);
136 	} else {
137 		ret = hclge_comm_set_rss_algo_key(hw, hash_algo,
138 						  rss_cfg->rss_hash_key);
139 		if (ret)
140 			return ret;
141 	}
142 	rss_cfg->rss_algo = hash_algo;
143 
144 	return 0;
145 }
146 
147 int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
148 			     struct hclge_comm_hw *hw,
149 			     struct hclge_comm_rss_cfg *rss_cfg,
150 			     struct ethtool_rxnfc *nfc)
151 {
152 	struct hclge_comm_rss_input_tuple_cmd *req;
153 	struct hclge_desc desc;
154 	int ret;
155 
156 	if (nfc->data &
157 	    ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
158 		return -EINVAL;
159 
160 	req = (struct hclge_comm_rss_input_tuple_cmd *)desc.data;
161 	hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE,
162 					false);
163 
164 	ret = hclge_comm_init_rss_tuple_cmd(rss_cfg, nfc, ae_dev, req);
165 	if (ret) {
166 		dev_err(&hw->cmq.csq.pdev->dev,
167 			"failed to init rss tuple cmd, ret = %d.\n", ret);
168 		return ret;
169 	}
170 
171 	ret = hclge_comm_cmd_send(hw, &desc, 1);
172 	if (ret) {
173 		dev_err(&hw->cmq.csq.pdev->dev,
174 			"failed to set rss tuple, ret = %d.\n", ret);
175 		return ret;
176 	}
177 
178 	rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
179 	rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
180 	rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
181 	rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
182 	rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
183 	rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
184 	rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
185 	rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
186 	return 0;
187 }
188 
189 u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle)
190 {
191 	return HCLGE_COMM_RSS_KEY_SIZE;
192 }
193 
194 int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg,
195 			       const u8 hfunc, u8 *hash_algo)
196 {
197 	switch (hfunc) {
198 	case ETH_RSS_HASH_TOP:
199 		*hash_algo = HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ;
200 		return 0;
201 	case ETH_RSS_HASH_XOR:
202 		*hash_algo = HCLGE_COMM_RSS_HASH_ALGO_SIMPLE;
203 		return 0;
204 	case ETH_RSS_HASH_NO_CHANGE:
205 		*hash_algo = rss_cfg->rss_algo;
206 		return 0;
207 	default:
208 		return -EINVAL;
209 	}
210 }
211 
212 void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev,
213 				   struct hclge_comm_rss_cfg *rss_cfg)
214 {
215 	u16 i;
216 	/* Initialize RSS indirect table */
217 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
218 		rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
219 }
220 
221 int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type,
222 			     u8 *tuple_sets)
223 {
224 	switch (flow_type) {
225 	case TCP_V4_FLOW:
226 		*tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
227 		break;
228 	case UDP_V4_FLOW:
229 		*tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en;
230 		break;
231 	case TCP_V6_FLOW:
232 		*tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
233 		break;
234 	case UDP_V6_FLOW:
235 		*tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en;
236 		break;
237 	case SCTP_V4_FLOW:
238 		*tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
239 		break;
240 	case SCTP_V6_FLOW:
241 		*tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
242 		break;
243 	case IPV4_FLOW:
244 	case IPV6_FLOW:
245 		*tuple_sets = HCLGE_COMM_S_IP_BIT | HCLGE_COMM_D_IP_BIT;
246 		break;
247 	default:
248 		return -EINVAL;
249 	}
250 
251 	return 0;
252 }
253 
254 static void
255 hclge_comm_append_rss_msb_info(struct hclge_comm_rss_ind_tbl_cmd *req,
256 			       u16 qid, u32 j)
257 {
258 	u8 rss_msb_oft;
259 	u8 rss_msb_val;
260 
261 	rss_msb_oft =
262 		j * HCLGE_COMM_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
263 	rss_msb_val = (qid >> HCLGE_COMM_RSS_CFG_TBL_BW_L & 0x1) <<
264 		(j * HCLGE_COMM_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
265 	req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
266 }
267 
268 int hclge_comm_set_rss_indir_table(struct hnae3_ae_dev *ae_dev,
269 				   struct hclge_comm_hw *hw, const u16 *indir)
270 {
271 	struct hclge_comm_rss_ind_tbl_cmd *req;
272 	struct hclge_desc desc;
273 	u16 rss_cfg_tbl_num;
274 	int ret;
275 	u16 qid;
276 	u16 i;
277 	u32 j;
278 
279 	req = (struct hclge_comm_rss_ind_tbl_cmd *)desc.data;
280 	rss_cfg_tbl_num = ae_dev->dev_specs.rss_ind_tbl_size /
281 			  HCLGE_COMM_RSS_CFG_TBL_SIZE;
282 
283 	for (i = 0; i < rss_cfg_tbl_num; i++) {
284 		hclge_comm_cmd_setup_basic_desc(&desc,
285 						HCLGE_OPC_RSS_INDIR_TABLE,
286 						false);
287 
288 		req->start_table_index =
289 			cpu_to_le16(i * HCLGE_COMM_RSS_CFG_TBL_SIZE);
290 		req->rss_set_bitmap =
291 			cpu_to_le16(HCLGE_COMM_RSS_SET_BITMAP_MSK);
292 		for (j = 0; j < HCLGE_COMM_RSS_CFG_TBL_SIZE; j++) {
293 			qid = indir[i * HCLGE_COMM_RSS_CFG_TBL_SIZE + j];
294 			req->rss_qid_l[j] = qid & 0xff;
295 			hclge_comm_append_rss_msb_info(req, qid, j);
296 		}
297 		ret = hclge_comm_cmd_send(hw, &desc, 1);
298 		if (ret) {
299 			dev_err(&hw->cmq.csq.pdev->dev,
300 				"failed to configure rss table, ret = %d.\n",
301 				ret);
302 			return ret;
303 		}
304 	}
305 	return 0;
306 }
307 
308 int hclge_comm_set_rss_input_tuple(struct hnae3_handle *nic,
309 				   struct hclge_comm_hw *hw, bool is_pf,
310 				   struct hclge_comm_rss_cfg *rss_cfg)
311 {
312 	struct hclge_comm_rss_input_tuple_cmd *req;
313 	struct hclge_desc desc;
314 	int ret;
315 
316 	hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE,
317 					false);
318 
319 	req = (struct hclge_comm_rss_input_tuple_cmd *)desc.data;
320 
321 	req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
322 	req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
323 	req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
324 	req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
325 	req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
326 	req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
327 	req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
328 	req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
329 
330 	ret = hclge_comm_cmd_send(hw, &desc, 1);
331 	if (ret)
332 		dev_err(&hw->cmq.csq.pdev->dev,
333 			"failed to configure rss input, ret = %d.\n", ret);
334 	return ret;
335 }
336 
337 void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key,
338 				  u8 *hfunc)
339 {
340 	/* Get hash algorithm */
341 	if (hfunc) {
342 		switch (rss_cfg->rss_algo) {
343 		case HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ:
344 			*hfunc = ETH_RSS_HASH_TOP;
345 			break;
346 		case HCLGE_COMM_RSS_HASH_ALGO_SIMPLE:
347 			*hfunc = ETH_RSS_HASH_XOR;
348 			break;
349 		default:
350 			*hfunc = ETH_RSS_HASH_UNKNOWN;
351 			break;
352 		}
353 	}
354 
355 	/* Get the RSS Key required by the user */
356 	if (key)
357 		memcpy(key, rss_cfg->rss_hash_key, HCLGE_COMM_RSS_KEY_SIZE);
358 }
359 
360 void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
361 				  u32 *indir, u16 rss_ind_tbl_size)
362 {
363 	u16 i;
364 
365 	if (!indir)
366 		return;
367 
368 	for (i = 0; i < rss_ind_tbl_size; i++)
369 		indir[i] = rss_cfg->rss_indirection_tbl[i];
370 }
371 
372 int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
373 				const u8 *key)
374 {
375 	struct hclge_comm_rss_config_cmd *req;
376 	unsigned int key_offset = 0;
377 	struct hclge_desc desc;
378 	int key_counts;
379 	int key_size;
380 	int ret;
381 
382 	key_counts = HCLGE_COMM_RSS_KEY_SIZE;
383 	req = (struct hclge_comm_rss_config_cmd *)desc.data;
384 
385 	while (key_counts) {
386 		hclge_comm_cmd_setup_basic_desc(&desc,
387 						HCLGE_OPC_RSS_GENERIC_CONFIG,
388 						false);
389 
390 		req->hash_config |= (hfunc & HCLGE_COMM_RSS_HASH_ALGO_MASK);
391 		req->hash_config |=
392 			(key_offset << HCLGE_COMM_RSS_HASH_KEY_OFFSET_B);
393 
394 		key_size = min(HCLGE_COMM_RSS_HASH_KEY_NUM, key_counts);
395 		memcpy(req->hash_key,
396 		       key + key_offset * HCLGE_COMM_RSS_HASH_KEY_NUM,
397 		       key_size);
398 
399 		key_counts -= key_size;
400 		key_offset++;
401 		ret = hclge_comm_cmd_send(hw, &desc, 1);
402 		if (ret) {
403 			dev_err(&hw->cmq.csq.pdev->dev,
404 				"failed to configure RSS key, ret = %d.\n",
405 				ret);
406 			return ret;
407 		}
408 	}
409 
410 	return 0;
411 }
412 
413 static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
414 {
415 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_COMM_S_PORT_BIT : 0;
416 
417 	if (nfc->data & RXH_L4_B_2_3)
418 		hash_sets |= HCLGE_COMM_D_PORT_BIT;
419 	else
420 		hash_sets &= ~HCLGE_COMM_D_PORT_BIT;
421 
422 	if (nfc->data & RXH_IP_SRC)
423 		hash_sets |= HCLGE_COMM_S_IP_BIT;
424 	else
425 		hash_sets &= ~HCLGE_COMM_S_IP_BIT;
426 
427 	if (nfc->data & RXH_IP_DST)
428 		hash_sets |= HCLGE_COMM_D_IP_BIT;
429 	else
430 		hash_sets &= ~HCLGE_COMM_D_IP_BIT;
431 
432 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
433 		hash_sets |= HCLGE_COMM_V_TAG_BIT;
434 
435 	return hash_sets;
436 }
437 
438 int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
439 				  struct ethtool_rxnfc *nfc,
440 				  struct hnae3_ae_dev *ae_dev,
441 				  struct hclge_comm_rss_input_tuple_cmd *req)
442 {
443 	u8 tuple_sets;
444 
445 	req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
446 	req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
447 	req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
448 	req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
449 	req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
450 	req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
451 	req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
452 	req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
453 
454 	tuple_sets = hclge_comm_get_rss_hash_bits(nfc);
455 	switch (nfc->flow_type) {
456 	case TCP_V4_FLOW:
457 		req->ipv4_tcp_en = tuple_sets;
458 		break;
459 	case TCP_V6_FLOW:
460 		req->ipv6_tcp_en = tuple_sets;
461 		break;
462 	case UDP_V4_FLOW:
463 		req->ipv4_udp_en = tuple_sets;
464 		break;
465 	case UDP_V6_FLOW:
466 		req->ipv6_udp_en = tuple_sets;
467 		break;
468 	case SCTP_V4_FLOW:
469 		req->ipv4_sctp_en = tuple_sets;
470 		break;
471 	case SCTP_V6_FLOW:
472 		if (ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
473 		    (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
474 			return -EINVAL;
475 
476 		req->ipv6_sctp_en = tuple_sets;
477 		break;
478 	case IPV4_FLOW:
479 		req->ipv4_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
480 		break;
481 	case IPV6_FLOW:
482 		req->ipv6_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
483 		break;
484 	default:
485 		return -EINVAL;
486 	}
487 
488 	return 0;
489 }
490 
491 u64 hclge_comm_convert_rss_tuple(u8 tuple_sets)
492 {
493 	u64 tuple_data = 0;
494 
495 	if (tuple_sets & HCLGE_COMM_D_PORT_BIT)
496 		tuple_data |= RXH_L4_B_2_3;
497 	if (tuple_sets & HCLGE_COMM_S_PORT_BIT)
498 		tuple_data |= RXH_L4_B_0_1;
499 	if (tuple_sets & HCLGE_COMM_D_IP_BIT)
500 		tuple_data |= RXH_IP_DST;
501 	if (tuple_sets & HCLGE_COMM_S_IP_BIT)
502 		tuple_data |= RXH_IP_SRC;
503 
504 	return tuple_data;
505 }
506