1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright (C) 2014-2017 aQuantia Corporation. */
3 
4 /* File aq_filters.c: RX filters related functions. */
5 
6 #include "aq_filters.h"
7 
8 static bool __must_check
9 aq_rule_is_approve(struct ethtool_rx_flow_spec *fsp)
10 {
11 	if (fsp->flow_type & FLOW_MAC_EXT)
12 		return false;
13 
14 	switch (fsp->flow_type & ~FLOW_EXT) {
15 	case ETHER_FLOW:
16 	case TCP_V4_FLOW:
17 	case UDP_V4_FLOW:
18 	case SCTP_V4_FLOW:
19 	case TCP_V6_FLOW:
20 	case UDP_V6_FLOW:
21 	case SCTP_V6_FLOW:
22 	case IPV4_FLOW:
23 	case IPV6_FLOW:
24 		return true;
25 	case IP_USER_FLOW:
26 		switch (fsp->h_u.usr_ip4_spec.proto) {
27 		case IPPROTO_TCP:
28 		case IPPROTO_UDP:
29 		case IPPROTO_SCTP:
30 		case IPPROTO_IP:
31 			return true;
32 		default:
33 			return false;
34 			}
35 	case IPV6_USER_FLOW:
36 		switch (fsp->h_u.usr_ip6_spec.l4_proto) {
37 		case IPPROTO_TCP:
38 		case IPPROTO_UDP:
39 		case IPPROTO_SCTP:
40 		case IPPROTO_IP:
41 			return true;
42 		default:
43 			return false;
44 			}
45 	default:
46 		return false;
47 	}
48 
49 	return false;
50 }
51 
52 static bool __must_check
53 aq_match_filter(struct ethtool_rx_flow_spec *fsp1,
54 		struct ethtool_rx_flow_spec *fsp2)
55 {
56 	if (fsp1->flow_type != fsp2->flow_type ||
57 	    memcmp(&fsp1->h_u, &fsp2->h_u, sizeof(fsp2->h_u)) ||
58 	    memcmp(&fsp1->h_ext, &fsp2->h_ext, sizeof(fsp2->h_ext)) ||
59 	    memcmp(&fsp1->m_u, &fsp2->m_u, sizeof(fsp2->m_u)) ||
60 	    memcmp(&fsp1->m_ext, &fsp2->m_ext, sizeof(fsp2->m_ext)))
61 		return false;
62 
63 	return true;
64 }
65 
66 static bool __must_check
67 aq_rule_already_exists(struct aq_nic_s *aq_nic,
68 		       struct ethtool_rx_flow_spec *fsp)
69 {
70 	struct aq_rx_filter *rule;
71 	struct hlist_node *aq_node2;
72 	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
73 
74 	hlist_for_each_entry_safe(rule, aq_node2,
75 				  &rx_fltrs->filter_list, aq_node) {
76 		if (rule->aq_fsp.location == fsp->location)
77 			continue;
78 		if (aq_match_filter(&rule->aq_fsp, fsp)) {
79 			netdev_err(aq_nic->ndev,
80 				   "ethtool: This filter is already set\n");
81 			return true;
82 		}
83 	}
84 
85 	return false;
86 }
87 
88 static int aq_check_approve_fl3l4(struct aq_nic_s *aq_nic,
89 				  struct aq_hw_rx_fltrs_s *rx_fltrs,
90 				  struct ethtool_rx_flow_spec *fsp)
91 {
92 	if (fsp->location < AQ_RX_FIRST_LOC_FL3L4 ||
93 	    fsp->location > AQ_RX_LAST_LOC_FL3L4) {
94 		netdev_err(aq_nic->ndev,
95 			   "ethtool: location must be in range [%d, %d]",
96 			   AQ_RX_FIRST_LOC_FL3L4,
97 			   AQ_RX_LAST_LOC_FL3L4);
98 		return -EINVAL;
99 	}
100 	if (rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv4) {
101 		rx_fltrs->fl3l4.is_ipv6 = false;
102 		netdev_err(aq_nic->ndev,
103 			   "ethtool: mixing ipv4 and ipv6 is not allowed");
104 		return -EINVAL;
105 	} else if (!rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv6) {
106 		rx_fltrs->fl3l4.is_ipv6 = true;
107 		netdev_err(aq_nic->ndev,
108 			   "ethtool: mixing ipv4 and ipv6 is not allowed");
109 		return -EINVAL;
110 	} else if (rx_fltrs->fl3l4.is_ipv6		      &&
111 		   fsp->location != AQ_RX_FIRST_LOC_FL3L4 + 4 &&
112 		   fsp->location != AQ_RX_FIRST_LOC_FL3L4) {
113 		netdev_err(aq_nic->ndev,
114 			   "ethtool: The specified location for ipv6 must be %d or %d",
115 			   AQ_RX_FIRST_LOC_FL3L4, AQ_RX_FIRST_LOC_FL3L4 + 4);
116 		return -EINVAL;
117 	}
118 
119 	return 0;
120 }
121 
122 static int __must_check
123 aq_check_approve_fl2(struct aq_nic_s *aq_nic,
124 		     struct aq_hw_rx_fltrs_s *rx_fltrs,
125 		     struct ethtool_rx_flow_spec *fsp)
126 {
127 	if (fsp->location < AQ_RX_FIRST_LOC_FETHERT ||
128 	    fsp->location > AQ_RX_LAST_LOC_FETHERT) {
129 		netdev_err(aq_nic->ndev,
130 			   "ethtool: location must be in range [%d, %d]",
131 			   AQ_RX_FIRST_LOC_FETHERT,
132 			   AQ_RX_LAST_LOC_FETHERT);
133 		return -EINVAL;
134 	}
135 
136 	if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK &&
137 	    fsp->m_u.ether_spec.h_proto == 0U) {
138 		netdev_err(aq_nic->ndev,
139 			   "ethtool: proto (ether_type) parameter must be specified");
140 		return -EINVAL;
141 	}
142 
143 	return 0;
144 }
145 
146 static int __must_check
147 aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
148 		       struct aq_hw_rx_fltrs_s *rx_fltrs,
149 		       struct ethtool_rx_flow_spec *fsp)
150 {
151 	if (fsp->location < AQ_RX_FIRST_LOC_FVLANID ||
152 	    fsp->location > AQ_RX_LAST_LOC_FVLANID) {
153 		netdev_err(aq_nic->ndev,
154 			   "ethtool: location must be in range [%d, %d]",
155 			   AQ_RX_FIRST_LOC_FVLANID,
156 			   AQ_RX_LAST_LOC_FVLANID);
157 		return -EINVAL;
158 	}
159 
160 	if ((aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
161 	    (!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci),
162 		       aq_nic->active_vlans))) {
163 		netdev_err(aq_nic->ndev,
164 			   "ethtool: unknown vlan-id specified");
165 		return -EINVAL;
166 	}
167 
168 	if (fsp->ring_cookie > aq_nic->aq_nic_cfg.num_rss_queues) {
169 		netdev_err(aq_nic->ndev,
170 			   "ethtool: queue number must be in range [0, %d]",
171 			   aq_nic->aq_nic_cfg.num_rss_queues - 1);
172 		return -EINVAL;
173 	}
174 	return 0;
175 }
176 
177 static int __must_check
178 aq_check_filter(struct aq_nic_s *aq_nic,
179 		struct ethtool_rx_flow_spec *fsp)
180 {
181 	int err = 0;
182 	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
183 
184 	if (fsp->flow_type & FLOW_EXT) {
185 		if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_VID_MASK) {
186 			err = aq_check_approve_fvlan(aq_nic, rx_fltrs, fsp);
187 		} else if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK) {
188 			err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
189 		} else {
190 			netdev_err(aq_nic->ndev,
191 				   "ethtool: invalid vlan mask 0x%x specified",
192 				   be16_to_cpu(fsp->m_ext.vlan_tci));
193 			err = -EINVAL;
194 		}
195 	} else {
196 		switch (fsp->flow_type & ~FLOW_EXT) {
197 		case ETHER_FLOW:
198 			err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
199 			break;
200 		case TCP_V4_FLOW:
201 		case UDP_V4_FLOW:
202 		case SCTP_V4_FLOW:
203 		case IPV4_FLOW:
204 		case IP_USER_FLOW:
205 			rx_fltrs->fl3l4.is_ipv6 = false;
206 			err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
207 			break;
208 		case TCP_V6_FLOW:
209 		case UDP_V6_FLOW:
210 		case SCTP_V6_FLOW:
211 		case IPV6_FLOW:
212 		case IPV6_USER_FLOW:
213 			rx_fltrs->fl3l4.is_ipv6 = true;
214 			err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
215 			break;
216 		default:
217 			netdev_err(aq_nic->ndev,
218 				   "ethtool: unknown flow-type specified");
219 			err = -EINVAL;
220 		}
221 	}
222 
223 	return err;
224 }
225 
226 static bool __must_check
227 aq_rule_is_not_support(struct aq_nic_s *aq_nic,
228 		       struct ethtool_rx_flow_spec *fsp)
229 {
230 	bool rule_is_not_support = false;
231 
232 	if (!(aq_nic->ndev->features & NETIF_F_NTUPLE)) {
233 		netdev_err(aq_nic->ndev,
234 			   "ethtool: Please, to enable the RX flow control:\n"
235 			   "ethtool -K %s ntuple on\n", aq_nic->ndev->name);
236 		rule_is_not_support = true;
237 	} else if (!aq_rule_is_approve(fsp)) {
238 		netdev_err(aq_nic->ndev,
239 			   "ethtool: The specified flow type is not supported\n");
240 		rule_is_not_support = true;
241 	} else if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW &&
242 		   (fsp->h_u.tcp_ip4_spec.tos ||
243 		    fsp->h_u.tcp_ip6_spec.tclass)) {
244 		netdev_err(aq_nic->ndev,
245 			   "ethtool: The specified tos tclass are not supported\n");
246 		rule_is_not_support = true;
247 	} else if (fsp->flow_type & FLOW_MAC_EXT) {
248 		netdev_err(aq_nic->ndev,
249 			   "ethtool: MAC_EXT is not supported");
250 		rule_is_not_support = true;
251 	}
252 
253 	return rule_is_not_support;
254 }
255 
256 static bool __must_check
257 aq_rule_is_not_correct(struct aq_nic_s *aq_nic,
258 		       struct ethtool_rx_flow_spec *fsp)
259 {
260 	bool rule_is_not_correct = false;
261 
262 	if (!aq_nic) {
263 		rule_is_not_correct = true;
264 	} else if (fsp->location > AQ_RX_MAX_RXNFC_LOC) {
265 		netdev_err(aq_nic->ndev,
266 			   "ethtool: The specified number %u rule is invalid\n",
267 			   fsp->location);
268 		rule_is_not_correct = true;
269 	} else if (aq_check_filter(aq_nic, fsp)) {
270 		rule_is_not_correct = true;
271 	} else if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
272 		if (fsp->ring_cookie >= aq_nic->aq_nic_cfg.num_rss_queues) {
273 			netdev_err(aq_nic->ndev,
274 				   "ethtool: The specified action is invalid.\n"
275 				   "Maximum allowable value action is %u.\n",
276 				   aq_nic->aq_nic_cfg.num_rss_queues - 1);
277 			rule_is_not_correct = true;
278 		}
279 	}
280 
281 	return rule_is_not_correct;
282 }
283 
284 static int __must_check
285 aq_check_rule(struct aq_nic_s *aq_nic,
286 	      struct ethtool_rx_flow_spec *fsp)
287 {
288 	int err = 0;
289 
290 	if (aq_rule_is_not_correct(aq_nic, fsp))
291 		err = -EINVAL;
292 	else if (aq_rule_is_not_support(aq_nic, fsp))
293 		err = -EOPNOTSUPP;
294 	else if (aq_rule_already_exists(aq_nic, fsp))
295 		err = -EEXIST;
296 
297 	return err;
298 }
299 
300 static void aq_set_data_fl2(struct aq_nic_s *aq_nic,
301 			    struct aq_rx_filter *aq_rx_fltr,
302 			    struct aq_rx_filter_l2 *data, bool add)
303 {
304 	const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
305 
306 	memset(data, 0, sizeof(*data));
307 
308 	data->location = fsp->location - AQ_RX_FIRST_LOC_FETHERT;
309 
310 	if (fsp->ring_cookie != RX_CLS_FLOW_DISC)
311 		data->queue = fsp->ring_cookie;
312 	else
313 		data->queue = -1;
314 
315 	data->ethertype = be16_to_cpu(fsp->h_u.ether_spec.h_proto);
316 	data->user_priority_en = be16_to_cpu(fsp->m_ext.vlan_tci)
317 				 == VLAN_PRIO_MASK;
318 	data->user_priority = (be16_to_cpu(fsp->h_ext.vlan_tci)
319 			       & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
320 }
321 
322 static int aq_add_del_fether(struct aq_nic_s *aq_nic,
323 			     struct aq_rx_filter *aq_rx_fltr, bool add)
324 {
325 	struct aq_rx_filter_l2 data;
326 	struct aq_hw_s *aq_hw = aq_nic->aq_hw;
327 	const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
328 
329 	aq_set_data_fl2(aq_nic, aq_rx_fltr, &data, add);
330 
331 	if (unlikely(!aq_hw_ops->hw_filter_l2_set))
332 		return -EOPNOTSUPP;
333 	if (unlikely(!aq_hw_ops->hw_filter_l2_clear))
334 		return -EOPNOTSUPP;
335 
336 	if (add)
337 		return aq_hw_ops->hw_filter_l2_set(aq_hw, &data);
338 	else
339 		return aq_hw_ops->hw_filter_l2_clear(aq_hw, &data);
340 }
341 
342 static bool aq_fvlan_is_busy(struct aq_rx_filter_vlan *aq_vlans, int vlan)
343 {
344 	int i;
345 
346 	for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
347 		if (aq_vlans[i].enable &&
348 		    aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED &&
349 		    aq_vlans[i].vlan_id == vlan) {
350 			return true;
351 		}
352 	}
353 
354 	return false;
355 }
356 
357 /* Function rebuilds array of vlan filters so that filters with assigned
358  * queue have a precedence over just vlans on the interface.
359  */
360 static void aq_fvlan_rebuild(struct aq_nic_s *aq_nic,
361 			     unsigned long *active_vlans,
362 			     struct aq_rx_filter_vlan *aq_vlans)
363 {
364 	bool vlan_busy = false;
365 	int vlan = -1;
366 	int i;
367 
368 	for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
369 		if (aq_vlans[i].enable &&
370 		    aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED)
371 			continue;
372 		do {
373 			vlan = find_next_bit(active_vlans,
374 					     VLAN_N_VID,
375 					     vlan + 1);
376 			if (vlan == VLAN_N_VID) {
377 				aq_vlans[i].enable = 0U;
378 				aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
379 				aq_vlans[i].vlan_id = 0;
380 				continue;
381 			}
382 
383 			vlan_busy = aq_fvlan_is_busy(aq_vlans, vlan);
384 			if (!vlan_busy) {
385 				aq_vlans[i].enable = 1U;
386 				aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
387 				aq_vlans[i].vlan_id = vlan;
388 			}
389 		} while (vlan_busy && vlan != VLAN_N_VID);
390 	}
391 }
392 
393 static int aq_set_data_fvlan(struct aq_nic_s *aq_nic,
394 			     struct aq_rx_filter *aq_rx_fltr,
395 			     struct aq_rx_filter_vlan *aq_vlans, bool add)
396 {
397 	const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
398 	int location = fsp->location - AQ_RX_FIRST_LOC_FVLANID;
399 	int i;
400 
401 	memset(&aq_vlans[location], 0, sizeof(aq_vlans[location]));
402 
403 	if (!add)
404 		return 0;
405 
406 	/* remove vlan if it was in table without queue assignment */
407 	for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
408 		if (aq_vlans[i].vlan_id ==
409 		   (be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK)) {
410 			aq_vlans[i].enable = false;
411 		}
412 	}
413 
414 	aq_vlans[location].location = location;
415 	aq_vlans[location].vlan_id = be16_to_cpu(fsp->h_ext.vlan_tci)
416 				     & VLAN_VID_MASK;
417 	aq_vlans[location].queue = fsp->ring_cookie & 0x1FU;
418 	aq_vlans[location].enable = 1U;
419 
420 	return 0;
421 }
422 
423 int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id)
424 {
425 	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
426 	struct aq_rx_filter *rule = NULL;
427 	struct hlist_node *aq_node2;
428 
429 	hlist_for_each_entry_safe(rule, aq_node2,
430 				  &rx_fltrs->filter_list, aq_node) {
431 		if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id)
432 			break;
433 	}
434 	if (rule && be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
435 		struct ethtool_rxnfc cmd;
436 
437 		cmd.fs.location = rule->aq_fsp.location;
438 		return aq_del_rxnfc_rule(aq_nic, &cmd);
439 	}
440 
441 	return -ENOENT;
442 }
443 
444 static int aq_add_del_fvlan(struct aq_nic_s *aq_nic,
445 			    struct aq_rx_filter *aq_rx_fltr, bool add)
446 {
447 	const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
448 
449 	if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
450 		return -EOPNOTSUPP;
451 
452 	aq_set_data_fvlan(aq_nic,
453 			  aq_rx_fltr,
454 			  aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans,
455 			  add);
456 
457 	return aq_filters_vlans_update(aq_nic);
458 }
459 
460 static int aq_set_data_fl3l4(struct aq_nic_s *aq_nic,
461 			     struct aq_rx_filter *aq_rx_fltr,
462 			     struct aq_rx_filter_l3l4 *data, bool add)
463 {
464 	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
465 	const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
466 
467 	memset(data, 0, sizeof(*data));
468 
469 	data->is_ipv6 = rx_fltrs->fl3l4.is_ipv6;
470 	data->location = HW_ATL_GET_REG_LOCATION_FL3L4(fsp->location);
471 
472 	if (!add) {
473 		if (!data->is_ipv6)
474 			rx_fltrs->fl3l4.active_ipv4 &= ~BIT(data->location);
475 		else
476 			rx_fltrs->fl3l4.active_ipv6 &=
477 				~BIT((data->location) / 4);
478 
479 		return 0;
480 	}
481 
482 	data->cmd |= HW_ATL_RX_ENABLE_FLTR_L3L4;
483 
484 	switch (fsp->flow_type) {
485 	case TCP_V4_FLOW:
486 	case TCP_V6_FLOW:
487 		data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
488 		break;
489 	case UDP_V4_FLOW:
490 	case UDP_V6_FLOW:
491 		data->cmd |= HW_ATL_RX_UDP;
492 		data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
493 		break;
494 	case SCTP_V4_FLOW:
495 	case SCTP_V6_FLOW:
496 		data->cmd |= HW_ATL_RX_SCTP;
497 		data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
498 		break;
499 	default:
500 		break;
501 	}
502 
503 	if (!data->is_ipv6) {
504 		data->ip_src[0] =
505 			ntohl(fsp->h_u.tcp_ip4_spec.ip4src);
506 		data->ip_dst[0] =
507 			ntohl(fsp->h_u.tcp_ip4_spec.ip4dst);
508 		rx_fltrs->fl3l4.active_ipv4 |= BIT(data->location);
509 	} else {
510 		int i;
511 
512 		rx_fltrs->fl3l4.active_ipv6 |= BIT((data->location) / 4);
513 		for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
514 			data->ip_dst[i] =
515 				ntohl(fsp->h_u.tcp_ip6_spec.ip6dst[i]);
516 			data->ip_src[i] =
517 				ntohl(fsp->h_u.tcp_ip6_spec.ip6src[i]);
518 		}
519 		data->cmd |= HW_ATL_RX_ENABLE_L3_IPV6;
520 	}
521 	if (fsp->flow_type != IP_USER_FLOW &&
522 	    fsp->flow_type != IPV6_USER_FLOW) {
523 		if (!data->is_ipv6) {
524 			data->p_dst =
525 				ntohs(fsp->h_u.tcp_ip4_spec.pdst);
526 			data->p_src =
527 				ntohs(fsp->h_u.tcp_ip4_spec.psrc);
528 		} else {
529 			data->p_dst =
530 				ntohs(fsp->h_u.tcp_ip6_spec.pdst);
531 			data->p_src =
532 				ntohs(fsp->h_u.tcp_ip6_spec.psrc);
533 		}
534 	}
535 	if (data->ip_src[0] && !data->is_ipv6)
536 		data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3;
537 	if (data->ip_dst[0] && !data->is_ipv6)
538 		data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3;
539 	if (data->p_dst)
540 		data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4;
541 	if (data->p_src)
542 		data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4;
543 	if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
544 		data->cmd |= HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT;
545 		data->cmd |= fsp->ring_cookie << HW_ATL_RX_QUEUE_FL3L4_SHIFT;
546 		data->cmd |= HW_ATL_RX_ENABLE_QUEUE_L3L4;
547 	} else {
548 		data->cmd |= HW_ATL_RX_DISCARD << HW_ATL_RX_ACTION_FL3F4_SHIFT;
549 	}
550 
551 	return 0;
552 }
553 
554 static int aq_set_fl3l4(struct aq_hw_s *aq_hw,
555 			const struct aq_hw_ops *aq_hw_ops,
556 			struct aq_rx_filter_l3l4 *data)
557 {
558 	if (unlikely(!aq_hw_ops->hw_filter_l3l4_set))
559 		return -EOPNOTSUPP;
560 
561 	return aq_hw_ops->hw_filter_l3l4_set(aq_hw, data);
562 }
563 
564 static int aq_add_del_fl3l4(struct aq_nic_s *aq_nic,
565 			    struct aq_rx_filter *aq_rx_fltr, bool add)
566 {
567 	const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
568 	struct aq_hw_s *aq_hw = aq_nic->aq_hw;
569 	struct aq_rx_filter_l3l4 data;
570 
571 	if (unlikely(aq_rx_fltr->aq_fsp.location < AQ_RX_FIRST_LOC_FL3L4 ||
572 		     aq_rx_fltr->aq_fsp.location > AQ_RX_LAST_LOC_FL3L4  ||
573 		     aq_set_data_fl3l4(aq_nic, aq_rx_fltr, &data, add)))
574 		return -EINVAL;
575 
576 	return aq_set_fl3l4(aq_hw, aq_hw_ops, &data);
577 }
578 
579 static int aq_add_del_rule(struct aq_nic_s *aq_nic,
580 			   struct aq_rx_filter *aq_rx_fltr, bool add)
581 {
582 	int err = -EINVAL;
583 
584 	if (aq_rx_fltr->aq_fsp.flow_type & FLOW_EXT) {
585 		if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
586 		    == VLAN_VID_MASK) {
587 			aq_rx_fltr->type = aq_rx_filter_vlan;
588 			err = aq_add_del_fvlan(aq_nic, aq_rx_fltr, add);
589 		} else if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
590 			== VLAN_PRIO_MASK) {
591 			aq_rx_fltr->type = aq_rx_filter_ethertype;
592 			err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
593 		}
594 	} else {
595 		switch (aq_rx_fltr->aq_fsp.flow_type & ~FLOW_EXT) {
596 		case ETHER_FLOW:
597 			aq_rx_fltr->type = aq_rx_filter_ethertype;
598 			err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
599 			break;
600 		case TCP_V4_FLOW:
601 		case UDP_V4_FLOW:
602 		case SCTP_V4_FLOW:
603 		case IP_USER_FLOW:
604 		case TCP_V6_FLOW:
605 		case UDP_V6_FLOW:
606 		case SCTP_V6_FLOW:
607 		case IPV6_USER_FLOW:
608 			aq_rx_fltr->type = aq_rx_filter_l3l4;
609 			err = aq_add_del_fl3l4(aq_nic, aq_rx_fltr, add);
610 			break;
611 		default:
612 			err = -EINVAL;
613 			break;
614 		}
615 	}
616 
617 	return err;
618 }
619 
620 static int aq_update_table_filters(struct aq_nic_s *aq_nic,
621 				   struct aq_rx_filter *aq_rx_fltr, u16 index,
622 				   struct ethtool_rxnfc *cmd)
623 {
624 	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
625 	struct aq_rx_filter *rule = NULL, *parent = NULL;
626 	struct hlist_node *aq_node2;
627 	int err = -EINVAL;
628 
629 	hlist_for_each_entry_safe(rule, aq_node2,
630 				  &rx_fltrs->filter_list, aq_node) {
631 		if (rule->aq_fsp.location >= index)
632 			break;
633 		parent = rule;
634 	}
635 
636 	if (rule && rule->aq_fsp.location == index) {
637 		err = aq_add_del_rule(aq_nic, rule, false);
638 		hlist_del(&rule->aq_node);
639 		kfree(rule);
640 		--rx_fltrs->active_filters;
641 	}
642 
643 	if (unlikely(!aq_rx_fltr))
644 		return err;
645 
646 	INIT_HLIST_NODE(&aq_rx_fltr->aq_node);
647 
648 	if (parent)
649 		hlist_add_behind(&aq_rx_fltr->aq_node, &parent->aq_node);
650 	else
651 		hlist_add_head(&aq_rx_fltr->aq_node, &rx_fltrs->filter_list);
652 
653 	++rx_fltrs->active_filters;
654 
655 	return 0;
656 }
657 
658 u16 aq_get_rxnfc_count_all_rules(struct aq_nic_s *aq_nic)
659 {
660 	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
661 
662 	return rx_fltrs->active_filters;
663 }
664 
665 struct aq_hw_rx_fltrs_s *aq_get_hw_rx_fltrs(struct aq_nic_s *aq_nic)
666 {
667 	return &aq_nic->aq_hw_rx_fltrs;
668 }
669 
670 int aq_add_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
671 {
672 	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
673 	struct ethtool_rx_flow_spec *fsp =
674 		(struct ethtool_rx_flow_spec *)&cmd->fs;
675 	struct aq_rx_filter *aq_rx_fltr;
676 	int err = 0;
677 
678 	err = aq_check_rule(aq_nic, fsp);
679 	if (err)
680 		goto err_exit;
681 
682 	aq_rx_fltr = kzalloc(sizeof(*aq_rx_fltr), GFP_KERNEL);
683 	if (unlikely(!aq_rx_fltr)) {
684 		err = -ENOMEM;
685 		goto err_exit;
686 	}
687 
688 	memcpy(&aq_rx_fltr->aq_fsp, fsp, sizeof(*fsp));
689 
690 	err = aq_update_table_filters(aq_nic, aq_rx_fltr, fsp->location, NULL);
691 	if (unlikely(err))
692 		goto err_free;
693 
694 	err = aq_add_del_rule(aq_nic, aq_rx_fltr, true);
695 	if (unlikely(err)) {
696 		hlist_del(&aq_rx_fltr->aq_node);
697 		--rx_fltrs->active_filters;
698 		goto err_free;
699 	}
700 
701 	return 0;
702 
703 err_free:
704 	kfree(aq_rx_fltr);
705 err_exit:
706 	return err;
707 }
708 
709 int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
710 {
711 	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
712 	struct aq_rx_filter *rule = NULL;
713 	struct hlist_node *aq_node2;
714 	int err = -EINVAL;
715 
716 	hlist_for_each_entry_safe(rule, aq_node2,
717 				  &rx_fltrs->filter_list, aq_node) {
718 		if (rule->aq_fsp.location == cmd->fs.location)
719 			break;
720 	}
721 
722 	if (rule && rule->aq_fsp.location == cmd->fs.location) {
723 		err = aq_add_del_rule(aq_nic, rule, false);
724 		hlist_del(&rule->aq_node);
725 		kfree(rule);
726 		--rx_fltrs->active_filters;
727 	}
728 	return err;
729 }
730 
731 int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd)
732 {
733 	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
734 	struct ethtool_rx_flow_spec *fsp =
735 			(struct ethtool_rx_flow_spec *)&cmd->fs;
736 	struct aq_rx_filter *rule = NULL;
737 	struct hlist_node *aq_node2;
738 
739 	hlist_for_each_entry_safe(rule, aq_node2,
740 				  &rx_fltrs->filter_list, aq_node)
741 		if (fsp->location <= rule->aq_fsp.location)
742 			break;
743 
744 	if (unlikely(!rule || fsp->location != rule->aq_fsp.location))
745 		return -EINVAL;
746 
747 	memcpy(fsp, &rule->aq_fsp, sizeof(*fsp));
748 
749 	return 0;
750 }
751 
752 int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd,
753 			   u32 *rule_locs)
754 {
755 	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
756 	struct hlist_node *aq_node2;
757 	struct aq_rx_filter *rule;
758 	int count = 0;
759 
760 	cmd->data = aq_get_rxnfc_count_all_rules(aq_nic);
761 
762 	hlist_for_each_entry_safe(rule, aq_node2,
763 				  &rx_fltrs->filter_list, aq_node) {
764 		if (unlikely(count == cmd->rule_cnt))
765 			return -EMSGSIZE;
766 
767 		rule_locs[count++] = rule->aq_fsp.location;
768 	}
769 
770 	cmd->rule_cnt = count;
771 
772 	return 0;
773 }
774 
775 int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic)
776 {
777 	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
778 	struct hlist_node *aq_node2;
779 	struct aq_rx_filter *rule;
780 	int err = 0;
781 
782 	hlist_for_each_entry_safe(rule, aq_node2,
783 				  &rx_fltrs->filter_list, aq_node) {
784 		err = aq_add_del_rule(aq_nic, rule, false);
785 		if (err)
786 			goto err_exit;
787 		hlist_del(&rule->aq_node);
788 		kfree(rule);
789 		--rx_fltrs->active_filters;
790 	}
791 
792 err_exit:
793 	return err;
794 }
795 
796 int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic)
797 {
798 	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
799 	struct hlist_node *aq_node2;
800 	struct aq_rx_filter *rule;
801 	int err = 0;
802 
803 	hlist_for_each_entry_safe(rule, aq_node2,
804 				  &rx_fltrs->filter_list, aq_node) {
805 		err = aq_add_del_rule(aq_nic, rule, true);
806 		if (err)
807 			goto err_exit;
808 	}
809 
810 err_exit:
811 	return err;
812 }
813 
814 int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
815 {
816 	const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
817 	struct aq_hw_s *aq_hw = aq_nic->aq_hw;
818 	int hweight = 0;
819 	int err = 0;
820 	int i;
821 
822 	if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
823 		return -EOPNOTSUPP;
824 	if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
825 		return -EOPNOTSUPP;
826 
827 	aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
828 			 aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
829 
830 	if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
831 		for (i = 0; i < BITS_TO_LONGS(VLAN_N_VID); i++)
832 			hweight += hweight_long(aq_nic->active_vlans[i]);
833 
834 		err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
835 		if (err)
836 			return err;
837 	}
838 
839 	err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
840 					    aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
841 					   );
842 	if (err)
843 		return err;
844 
845 	if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
846 		if (hweight < AQ_VLAN_MAX_FILTERS)
847 			err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, true);
848 		/* otherwise left in promiscue mode */
849 	}
850 
851 	return err;
852 }
853 
854 int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
855 {
856 	const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
857 	struct aq_hw_s *aq_hw = aq_nic->aq_hw;
858 	int err = 0;
859 
860 	memset(aq_nic->active_vlans, 0, sizeof(aq_nic->active_vlans));
861 	aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
862 			 aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
863 
864 	if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
865 		return -EOPNOTSUPP;
866 	if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
867 		return -EOPNOTSUPP;
868 
869 	err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
870 	if (err)
871 		return err;
872 	err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
873 					    aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
874 					   );
875 	return err;
876 }
877