1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include <linux/bitfield.h>
5 #include <linux/uaccess.h>
6 
7 /* ethtool support for iavf */
8 #include "iavf.h"
9 
10 /* ethtool statistics helpers */
11 
12 /**
13  * struct iavf_stats - definition for an ethtool statistic
14  * @stat_string: statistic name to display in ethtool -S output
15  * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64)
16  * @stat_offset: offsetof() the stat from a base pointer
17  *
18  * This structure defines a statistic to be added to the ethtool stats buffer.
19  * It defines a statistic as offset from a common base pointer. Stats should
20  * be defined in constant arrays using the IAVF_STAT macro, with every element
21  * of the array using the same _type for calculating the sizeof_stat and
22  * stat_offset.
23  *
24  * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or
25  * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from
26  * the iavf_add_ethtool_stat() helper function.
27  *
28  * The @stat_string is interpreted as a format string, allowing formatted
29  * values to be inserted while looping over multiple structures for a given
30  * statistics array. Thus, every statistic string in an array should have the
31  * same type and number of format specifiers, to be formatted by variadic
32  * arguments to the iavf_add_stat_string() helper function.
33  **/
34 struct iavf_stats {
35 	char stat_string[ETH_GSTRING_LEN];
36 	int sizeof_stat;
37 	int stat_offset;
38 };
39 
40 /* Helper macro to define an iavf_stat structure with proper size and type.
41  * Use this when defining constant statistics arrays. Note that @_type expects
42  * only a type name and is used multiple times.
43  */
44 #define IAVF_STAT(_type, _name, _stat) { \
45 	.stat_string = _name, \
46 	.sizeof_stat = sizeof_field(_type, _stat), \
47 	.stat_offset = offsetof(_type, _stat) \
48 }
49 
50 /* Helper macro for defining some statistics related to queues */
51 #define IAVF_QUEUE_STAT(_name, _stat) \
52 	IAVF_STAT(struct iavf_ring, _name, _stat)
53 
54 /* Stats associated with a Tx or Rx ring */
55 static const struct iavf_stats iavf_gstrings_queue_stats[] = {
56 	IAVF_QUEUE_STAT("%s-%u.packets", stats.packets),
57 	IAVF_QUEUE_STAT("%s-%u.bytes", stats.bytes),
58 };
59 
60 /**
61  * iavf_add_one_ethtool_stat - copy the stat into the supplied buffer
62  * @data: location to store the stat value
63  * @pointer: basis for where to copy from
64  * @stat: the stat definition
65  *
66  * Copies the stat data defined by the pointer and stat structure pair into
67  * the memory supplied as data. Used to implement iavf_add_ethtool_stats and
68  * iavf_add_queue_stats. If the pointer is null, data will be zero'd.
69  */
70 static void
iavf_add_one_ethtool_stat(u64 * data,void * pointer,const struct iavf_stats * stat)71 iavf_add_one_ethtool_stat(u64 *data, void *pointer,
72 			  const struct iavf_stats *stat)
73 {
74 	char *p;
75 
76 	if (!pointer) {
77 		/* ensure that the ethtool data buffer is zero'd for any stats
78 		 * which don't have a valid pointer.
79 		 */
80 		*data = 0;
81 		return;
82 	}
83 
84 	p = (char *)pointer + stat->stat_offset;
85 	switch (stat->sizeof_stat) {
86 	case sizeof(u64):
87 		*data = *((u64 *)p);
88 		break;
89 	case sizeof(u32):
90 		*data = *((u32 *)p);
91 		break;
92 	case sizeof(u16):
93 		*data = *((u16 *)p);
94 		break;
95 	case sizeof(u8):
96 		*data = *((u8 *)p);
97 		break;
98 	default:
99 		WARN_ONCE(1, "unexpected stat size for %s",
100 			  stat->stat_string);
101 		*data = 0;
102 	}
103 }
104 
105 /**
106  * __iavf_add_ethtool_stats - copy stats into the ethtool supplied buffer
107  * @data: ethtool stats buffer
108  * @pointer: location to copy stats from
109  * @stats: array of stats to copy
110  * @size: the size of the stats definition
111  *
112  * Copy the stats defined by the stats array using the pointer as a base into
113  * the data buffer supplied by ethtool. Updates the data pointer to point to
114  * the next empty location for successive calls to __iavf_add_ethtool_stats.
115  * If pointer is null, set the data values to zero and update the pointer to
116  * skip these stats.
117  **/
118 static void
__iavf_add_ethtool_stats(u64 ** data,void * pointer,const struct iavf_stats stats[],const unsigned int size)119 __iavf_add_ethtool_stats(u64 **data, void *pointer,
120 			 const struct iavf_stats stats[],
121 			 const unsigned int size)
122 {
123 	unsigned int i;
124 
125 	for (i = 0; i < size; i++)
126 		iavf_add_one_ethtool_stat((*data)++, pointer, &stats[i]);
127 }
128 
129 /**
130  * iavf_add_ethtool_stats - copy stats into ethtool supplied buffer
131  * @data: ethtool stats buffer
132  * @pointer: location where stats are stored
133  * @stats: static const array of stat definitions
134  *
135  * Macro to ease the use of __iavf_add_ethtool_stats by taking a static
136  * constant stats array and passing the ARRAY_SIZE(). This avoids typos by
137  * ensuring that we pass the size associated with the given stats array.
138  *
139  * The parameter @stats is evaluated twice, so parameters with side effects
140  * should be avoided.
141  **/
142 #define iavf_add_ethtool_stats(data, pointer, stats) \
143 	__iavf_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats))
144 
145 /**
146  * iavf_add_queue_stats - copy queue statistics into supplied buffer
147  * @data: ethtool stats buffer
148  * @ring: the ring to copy
149  *
150  * Queue statistics must be copied while protected by
151  * u64_stats_fetch_begin, so we can't directly use iavf_add_ethtool_stats.
152  * Assumes that queue stats are defined in iavf_gstrings_queue_stats. If the
153  * ring pointer is null, zero out the queue stat values and update the data
154  * pointer. Otherwise safely copy the stats from the ring into the supplied
155  * buffer and update the data pointer when finished.
156  *
157  * This function expects to be called while under rcu_read_lock().
158  **/
159 static void
iavf_add_queue_stats(u64 ** data,struct iavf_ring * ring)160 iavf_add_queue_stats(u64 **data, struct iavf_ring *ring)
161 {
162 	const unsigned int size = ARRAY_SIZE(iavf_gstrings_queue_stats);
163 	const struct iavf_stats *stats = iavf_gstrings_queue_stats;
164 	unsigned int start;
165 	unsigned int i;
166 
167 	/* To avoid invalid statistics values, ensure that we keep retrying
168 	 * the copy until we get a consistent value according to
169 	 * u64_stats_fetch_retry. But first, make sure our ring is
170 	 * non-null before attempting to access its syncp.
171 	 */
172 	do {
173 		start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp);
174 		for (i = 0; i < size; i++)
175 			iavf_add_one_ethtool_stat(&(*data)[i], ring, &stats[i]);
176 	} while (ring && u64_stats_fetch_retry(&ring->syncp, start));
177 
178 	/* Once we successfully copy the stats in, update the data pointer */
179 	*data += size;
180 }
181 
182 /**
183  * __iavf_add_stat_strings - copy stat strings into ethtool buffer
184  * @p: ethtool supplied buffer
185  * @stats: stat definitions array
186  * @size: size of the stats array
187  *
188  * Format and copy the strings described by stats into the buffer pointed at
189  * by p.
190  **/
__iavf_add_stat_strings(u8 ** p,const struct iavf_stats stats[],const unsigned int size,...)191 static void __iavf_add_stat_strings(u8 **p, const struct iavf_stats stats[],
192 				    const unsigned int size, ...)
193 {
194 	unsigned int i;
195 
196 	for (i = 0; i < size; i++) {
197 		va_list args;
198 
199 		va_start(args, size);
200 		vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args);
201 		*p += ETH_GSTRING_LEN;
202 		va_end(args);
203 	}
204 }
205 
206 /**
207  * iavf_add_stat_strings - copy stat strings into ethtool buffer
208  * @p: ethtool supplied buffer
209  * @stats: stat definitions array
210  *
211  * Format and copy the strings described by the const static stats value into
212  * the buffer pointed at by p.
213  *
214  * The parameter @stats is evaluated twice, so parameters with side effects
215  * should be avoided. Additionally, stats must be an array such that
216  * ARRAY_SIZE can be called on it.
217  **/
218 #define iavf_add_stat_strings(p, stats, ...) \
219 	__iavf_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
220 
221 #define VF_STAT(_name, _stat) \
222 	IAVF_STAT(struct iavf_adapter, _name, _stat)
223 
224 static const struct iavf_stats iavf_gstrings_stats[] = {
225 	VF_STAT("rx_bytes", current_stats.rx_bytes),
226 	VF_STAT("rx_unicast", current_stats.rx_unicast),
227 	VF_STAT("rx_multicast", current_stats.rx_multicast),
228 	VF_STAT("rx_broadcast", current_stats.rx_broadcast),
229 	VF_STAT("rx_discards", current_stats.rx_discards),
230 	VF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
231 	VF_STAT("tx_bytes", current_stats.tx_bytes),
232 	VF_STAT("tx_unicast", current_stats.tx_unicast),
233 	VF_STAT("tx_multicast", current_stats.tx_multicast),
234 	VF_STAT("tx_broadcast", current_stats.tx_broadcast),
235 	VF_STAT("tx_discards", current_stats.tx_discards),
236 	VF_STAT("tx_errors", current_stats.tx_errors),
237 };
238 
239 #define IAVF_STATS_LEN	ARRAY_SIZE(iavf_gstrings_stats)
240 
241 #define IAVF_QUEUE_STATS_LEN	ARRAY_SIZE(iavf_gstrings_queue_stats)
242 
243 /**
244  * iavf_get_link_ksettings - Get Link Speed and Duplex settings
245  * @netdev: network interface device structure
246  * @cmd: ethtool command
247  *
248  * Reports speed/duplex settings. Because this is a VF, we don't know what
249  * kind of link we really have, so we fake it.
250  **/
iavf_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)251 static int iavf_get_link_ksettings(struct net_device *netdev,
252 				   struct ethtool_link_ksettings *cmd)
253 {
254 	struct iavf_adapter *adapter = netdev_priv(netdev);
255 
256 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
257 	cmd->base.autoneg = AUTONEG_DISABLE;
258 	cmd->base.port = PORT_NONE;
259 	cmd->base.duplex = DUPLEX_FULL;
260 
261 	if (ADV_LINK_SUPPORT(adapter)) {
262 		if (adapter->link_speed_mbps &&
263 		    adapter->link_speed_mbps < U32_MAX)
264 			cmd->base.speed = adapter->link_speed_mbps;
265 		else
266 			cmd->base.speed = SPEED_UNKNOWN;
267 
268 		return 0;
269 	}
270 
271 	switch (adapter->link_speed) {
272 	case VIRTCHNL_LINK_SPEED_40GB:
273 		cmd->base.speed = SPEED_40000;
274 		break;
275 	case VIRTCHNL_LINK_SPEED_25GB:
276 		cmd->base.speed = SPEED_25000;
277 		break;
278 	case VIRTCHNL_LINK_SPEED_20GB:
279 		cmd->base.speed = SPEED_20000;
280 		break;
281 	case VIRTCHNL_LINK_SPEED_10GB:
282 		cmd->base.speed = SPEED_10000;
283 		break;
284 	case VIRTCHNL_LINK_SPEED_5GB:
285 		cmd->base.speed = SPEED_5000;
286 		break;
287 	case VIRTCHNL_LINK_SPEED_2_5GB:
288 		cmd->base.speed = SPEED_2500;
289 		break;
290 	case VIRTCHNL_LINK_SPEED_1GB:
291 		cmd->base.speed = SPEED_1000;
292 		break;
293 	case VIRTCHNL_LINK_SPEED_100MB:
294 		cmd->base.speed = SPEED_100;
295 		break;
296 	default:
297 		break;
298 	}
299 
300 	return 0;
301 }
302 
303 /**
304  * iavf_get_sset_count - Get length of string set
305  * @netdev: network interface device structure
306  * @sset: id of string set
307  *
308  * Reports size of various string tables.
309  **/
iavf_get_sset_count(struct net_device * netdev,int sset)310 static int iavf_get_sset_count(struct net_device *netdev, int sset)
311 {
312 	/* Report the maximum number queues, even if not every queue is
313 	 * currently configured. Since allocation of queues is in pairs,
314 	 * use netdev->real_num_tx_queues * 2. The real_num_tx_queues is set
315 	 * at device creation and never changes.
316 	 */
317 
318 	if (sset == ETH_SS_STATS)
319 		return IAVF_STATS_LEN +
320 			(IAVF_QUEUE_STATS_LEN * 2 *
321 			 netdev->real_num_tx_queues);
322 	else
323 		return -EINVAL;
324 }
325 
326 /**
327  * iavf_get_ethtool_stats - report device statistics
328  * @netdev: network interface device structure
329  * @stats: ethtool statistics structure
330  * @data: pointer to data buffer
331  *
332  * All statistics are added to the data buffer as an array of u64.
333  **/
iavf_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)334 static void iavf_get_ethtool_stats(struct net_device *netdev,
335 				   struct ethtool_stats *stats, u64 *data)
336 {
337 	struct iavf_adapter *adapter = netdev_priv(netdev);
338 	unsigned int i;
339 
340 	/* Explicitly request stats refresh */
341 	iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_REQUEST_STATS);
342 
343 	iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);
344 
345 	rcu_read_lock();
346 	/* As num_active_queues describe both tx and rx queues, we can use
347 	 * it to iterate over rings' stats.
348 	 */
349 	for (i = 0; i < adapter->num_active_queues; i++) {
350 		struct iavf_ring *ring;
351 
352 		/* Tx rings stats */
353 		ring = &adapter->tx_rings[i];
354 		iavf_add_queue_stats(&data, ring);
355 
356 		/* Rx rings stats */
357 		ring = &adapter->rx_rings[i];
358 		iavf_add_queue_stats(&data, ring);
359 	}
360 	rcu_read_unlock();
361 }
362 
363 /**
364  * iavf_get_stat_strings - Get stat strings
365  * @netdev: network interface device structure
366  * @data: buffer for string data
367  *
368  * Builds the statistics string table
369  **/
iavf_get_stat_strings(struct net_device * netdev,u8 * data)370 static void iavf_get_stat_strings(struct net_device *netdev, u8 *data)
371 {
372 	unsigned int i;
373 
374 	iavf_add_stat_strings(&data, iavf_gstrings_stats);
375 
376 	/* Queues are always allocated in pairs, so we just use
377 	 * real_num_tx_queues for both Tx and Rx queues.
378 	 */
379 	for (i = 0; i < netdev->real_num_tx_queues; i++) {
380 		iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
381 				      "tx", i);
382 		iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
383 				      "rx", i);
384 	}
385 }
386 
387 /**
388  * iavf_get_strings - Get string set
389  * @netdev: network interface device structure
390  * @sset: id of string set
391  * @data: buffer for string data
392  *
393  * Builds string tables for various string sets
394  **/
iavf_get_strings(struct net_device * netdev,u32 sset,u8 * data)395 static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
396 {
397 	switch (sset) {
398 	case ETH_SS_STATS:
399 		iavf_get_stat_strings(netdev, data);
400 		break;
401 	default:
402 		break;
403 	}
404 }
405 
406 /**
407  * iavf_get_msglevel - Get debug message level
408  * @netdev: network interface device structure
409  *
410  * Returns current debug message level.
411  **/
iavf_get_msglevel(struct net_device * netdev)412 static u32 iavf_get_msglevel(struct net_device *netdev)
413 {
414 	struct iavf_adapter *adapter = netdev_priv(netdev);
415 
416 	return adapter->msg_enable;
417 }
418 
419 /**
420  * iavf_set_msglevel - Set debug message level
421  * @netdev: network interface device structure
422  * @data: message level
423  *
424  * Set current debug message level. Higher values cause the driver to
425  * be noisier.
426  **/
iavf_set_msglevel(struct net_device * netdev,u32 data)427 static void iavf_set_msglevel(struct net_device *netdev, u32 data)
428 {
429 	struct iavf_adapter *adapter = netdev_priv(netdev);
430 
431 	if (IAVF_DEBUG_USER & data)
432 		adapter->hw.debug_mask = data;
433 	adapter->msg_enable = data;
434 }
435 
436 /**
437  * iavf_get_drvinfo - Get driver info
438  * @netdev: network interface device structure
439  * @drvinfo: ethool driver info structure
440  *
441  * Returns information about the driver and device for display to the user.
442  **/
iavf_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)443 static void iavf_get_drvinfo(struct net_device *netdev,
444 			     struct ethtool_drvinfo *drvinfo)
445 {
446 	struct iavf_adapter *adapter = netdev_priv(netdev);
447 
448 	strscpy(drvinfo->driver, iavf_driver_name, 32);
449 	strscpy(drvinfo->fw_version, "N/A", 4);
450 	strscpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
451 }
452 
453 /**
454  * iavf_get_ringparam - Get ring parameters
455  * @netdev: network interface device structure
456  * @ring: ethtool ringparam structure
457  * @kernel_ring: ethtool extenal ringparam structure
458  * @extack: netlink extended ACK report struct
459  *
460  * Returns current ring parameters. TX and RX rings are reported separately,
461  * but the number of rings is not reported.
462  **/
iavf_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)463 static void iavf_get_ringparam(struct net_device *netdev,
464 			       struct ethtool_ringparam *ring,
465 			       struct kernel_ethtool_ringparam *kernel_ring,
466 			       struct netlink_ext_ack *extack)
467 {
468 	struct iavf_adapter *adapter = netdev_priv(netdev);
469 
470 	ring->rx_max_pending = IAVF_MAX_RXD;
471 	ring->tx_max_pending = IAVF_MAX_TXD;
472 	ring->rx_pending = adapter->rx_desc_count;
473 	ring->tx_pending = adapter->tx_desc_count;
474 }
475 
476 /**
477  * iavf_set_ringparam - Set ring parameters
478  * @netdev: network interface device structure
479  * @ring: ethtool ringparam structure
480  * @kernel_ring: ethtool external ringparam structure
481  * @extack: netlink extended ACK report struct
482  *
483  * Sets ring parameters. TX and RX rings are controlled separately, but the
484  * number of rings is not specified, so all rings get the same settings.
485  **/
iavf_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)486 static int iavf_set_ringparam(struct net_device *netdev,
487 			      struct ethtool_ringparam *ring,
488 			      struct kernel_ethtool_ringparam *kernel_ring,
489 			      struct netlink_ext_ack *extack)
490 {
491 	struct iavf_adapter *adapter = netdev_priv(netdev);
492 	u32 new_rx_count, new_tx_count;
493 	int ret = 0;
494 
495 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
496 		return -EINVAL;
497 
498 	if (ring->tx_pending > IAVF_MAX_TXD ||
499 	    ring->tx_pending < IAVF_MIN_TXD ||
500 	    ring->rx_pending > IAVF_MAX_RXD ||
501 	    ring->rx_pending < IAVF_MIN_RXD) {
502 		netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
503 			   ring->tx_pending, ring->rx_pending, IAVF_MIN_TXD,
504 			   IAVF_MAX_RXD, IAVF_REQ_DESCRIPTOR_MULTIPLE);
505 		return -EINVAL;
506 	}
507 
508 	new_tx_count = ALIGN(ring->tx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
509 	if (new_tx_count != ring->tx_pending)
510 		netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n",
511 			    new_tx_count);
512 
513 	new_rx_count = ALIGN(ring->rx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
514 	if (new_rx_count != ring->rx_pending)
515 		netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n",
516 			    new_rx_count);
517 
518 	/* if nothing to do return success */
519 	if ((new_tx_count == adapter->tx_desc_count) &&
520 	    (new_rx_count == adapter->rx_desc_count)) {
521 		netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
522 		return 0;
523 	}
524 
525 	if (new_tx_count != adapter->tx_desc_count) {
526 		netdev_dbg(netdev, "Changing Tx descriptor count from %d to %d\n",
527 			   adapter->tx_desc_count, new_tx_count);
528 		adapter->tx_desc_count = new_tx_count;
529 	}
530 
531 	if (new_rx_count != adapter->rx_desc_count) {
532 		netdev_dbg(netdev, "Changing Rx descriptor count from %d to %d\n",
533 			   adapter->rx_desc_count, new_rx_count);
534 		adapter->rx_desc_count = new_rx_count;
535 	}
536 
537 	if (netif_running(netdev)) {
538 		iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
539 		ret = iavf_wait_for_reset(adapter);
540 		if (ret)
541 			netdev_warn(netdev, "Changing ring parameters timeout or interrupted waiting for reset");
542 	}
543 
544 	return ret;
545 }
546 
547 /**
548  * __iavf_get_coalesce - get per-queue coalesce settings
549  * @netdev: the netdev to check
550  * @ec: ethtool coalesce data structure
551  * @queue: which queue to pick
552  *
553  * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
554  * are per queue. If queue is <0 then we default to queue 0 as the
555  * representative value.
556  **/
__iavf_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,int queue)557 static int __iavf_get_coalesce(struct net_device *netdev,
558 			       struct ethtool_coalesce *ec, int queue)
559 {
560 	struct iavf_adapter *adapter = netdev_priv(netdev);
561 	struct iavf_ring *rx_ring, *tx_ring;
562 
563 	/* Rx and Tx usecs per queue value. If user doesn't specify the
564 	 * queue, return queue 0's value to represent.
565 	 */
566 	if (queue < 0)
567 		queue = 0;
568 	else if (queue >= adapter->num_active_queues)
569 		return -EINVAL;
570 
571 	rx_ring = &adapter->rx_rings[queue];
572 	tx_ring = &adapter->tx_rings[queue];
573 
574 	if (ITR_IS_DYNAMIC(rx_ring->itr_setting))
575 		ec->use_adaptive_rx_coalesce = 1;
576 
577 	if (ITR_IS_DYNAMIC(tx_ring->itr_setting))
578 		ec->use_adaptive_tx_coalesce = 1;
579 
580 	ec->rx_coalesce_usecs = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
581 	ec->tx_coalesce_usecs = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
582 
583 	return 0;
584 }
585 
586 /**
587  * iavf_get_coalesce - Get interrupt coalescing settings
588  * @netdev: network interface device structure
589  * @ec: ethtool coalesce structure
590  * @kernel_coal: ethtool CQE mode setting structure
591  * @extack: extack for reporting error messages
592  *
593  * Returns current coalescing settings. This is referred to elsewhere in the
594  * driver as Interrupt Throttle Rate, as this is how the hardware describes
595  * this functionality. Note that if per-queue settings have been modified this
596  * only represents the settings of queue 0.
597  **/
iavf_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)598 static int iavf_get_coalesce(struct net_device *netdev,
599 			     struct ethtool_coalesce *ec,
600 			     struct kernel_ethtool_coalesce *kernel_coal,
601 			     struct netlink_ext_ack *extack)
602 {
603 	return __iavf_get_coalesce(netdev, ec, -1);
604 }
605 
606 /**
607  * iavf_get_per_queue_coalesce - get coalesce values for specific queue
608  * @netdev: netdev to read
609  * @ec: coalesce settings from ethtool
610  * @queue: the queue to read
611  *
612  * Read specific queue's coalesce settings.
613  **/
iavf_get_per_queue_coalesce(struct net_device * netdev,u32 queue,struct ethtool_coalesce * ec)614 static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
615 				       struct ethtool_coalesce *ec)
616 {
617 	return __iavf_get_coalesce(netdev, ec, queue);
618 }
619 
620 /**
621  * iavf_set_itr_per_queue - set ITR values for specific queue
622  * @adapter: the VF adapter struct to set values for
623  * @ec: coalesce settings from ethtool
624  * @queue: the queue to modify
625  *
626  * Change the ITR settings for a specific queue.
627  **/
iavf_set_itr_per_queue(struct iavf_adapter * adapter,struct ethtool_coalesce * ec,int queue)628 static int iavf_set_itr_per_queue(struct iavf_adapter *adapter,
629 				  struct ethtool_coalesce *ec, int queue)
630 {
631 	struct iavf_ring *rx_ring = &adapter->rx_rings[queue];
632 	struct iavf_ring *tx_ring = &adapter->tx_rings[queue];
633 	struct iavf_q_vector *q_vector;
634 	u16 itr_setting;
635 
636 	itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
637 
638 	if (ec->rx_coalesce_usecs != itr_setting &&
639 	    ec->use_adaptive_rx_coalesce) {
640 		netif_info(adapter, drv, adapter->netdev,
641 			   "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n");
642 		return -EINVAL;
643 	}
644 
645 	itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
646 
647 	if (ec->tx_coalesce_usecs != itr_setting &&
648 	    ec->use_adaptive_tx_coalesce) {
649 		netif_info(adapter, drv, adapter->netdev,
650 			   "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n");
651 		return -EINVAL;
652 	}
653 
654 	rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
655 	tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
656 
657 	rx_ring->itr_setting |= IAVF_ITR_DYNAMIC;
658 	if (!ec->use_adaptive_rx_coalesce)
659 		rx_ring->itr_setting ^= IAVF_ITR_DYNAMIC;
660 
661 	tx_ring->itr_setting |= IAVF_ITR_DYNAMIC;
662 	if (!ec->use_adaptive_tx_coalesce)
663 		tx_ring->itr_setting ^= IAVF_ITR_DYNAMIC;
664 
665 	q_vector = rx_ring->q_vector;
666 	q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
667 
668 	q_vector = tx_ring->q_vector;
669 	q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
670 
671 	/* The interrupt handler itself will take care of programming
672 	 * the Tx and Rx ITR values based on the values we have entered
673 	 * into the q_vector, no need to write the values now.
674 	 */
675 	return 0;
676 }
677 
678 /**
679  * __iavf_set_coalesce - set coalesce settings for particular queue
680  * @netdev: the netdev to change
681  * @ec: ethtool coalesce settings
682  * @queue: the queue to change
683  *
684  * Sets the coalesce settings for a particular queue.
685  **/
__iavf_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,int queue)686 static int __iavf_set_coalesce(struct net_device *netdev,
687 			       struct ethtool_coalesce *ec, int queue)
688 {
689 	struct iavf_adapter *adapter = netdev_priv(netdev);
690 	int i;
691 
692 	if (ec->rx_coalesce_usecs > IAVF_MAX_ITR) {
693 		netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
694 		return -EINVAL;
695 	} else if (ec->tx_coalesce_usecs > IAVF_MAX_ITR) {
696 		netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
697 		return -EINVAL;
698 	}
699 
700 	/* Rx and Tx usecs has per queue value. If user doesn't specify the
701 	 * queue, apply to all queues.
702 	 */
703 	if (queue < 0) {
704 		for (i = 0; i < adapter->num_active_queues; i++)
705 			if (iavf_set_itr_per_queue(adapter, ec, i))
706 				return -EINVAL;
707 	} else if (queue < adapter->num_active_queues) {
708 		if (iavf_set_itr_per_queue(adapter, ec, queue))
709 			return -EINVAL;
710 	} else {
711 		netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
712 			   adapter->num_active_queues - 1);
713 		return -EINVAL;
714 	}
715 
716 	return 0;
717 }
718 
719 /**
720  * iavf_set_coalesce - Set interrupt coalescing settings
721  * @netdev: network interface device structure
722  * @ec: ethtool coalesce structure
723  * @kernel_coal: ethtool CQE mode setting structure
724  * @extack: extack for reporting error messages
725  *
726  * Change current coalescing settings for every queue.
727  **/
iavf_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)728 static int iavf_set_coalesce(struct net_device *netdev,
729 			     struct ethtool_coalesce *ec,
730 			     struct kernel_ethtool_coalesce *kernel_coal,
731 			     struct netlink_ext_ack *extack)
732 {
733 	return __iavf_set_coalesce(netdev, ec, -1);
734 }
735 
736 /**
737  * iavf_set_per_queue_coalesce - set specific queue's coalesce settings
738  * @netdev: the netdev to change
739  * @ec: ethtool's coalesce settings
740  * @queue: the queue to modify
741  *
742  * Modifies a specific queue's coalesce settings.
743  */
iavf_set_per_queue_coalesce(struct net_device * netdev,u32 queue,struct ethtool_coalesce * ec)744 static int iavf_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
745 				       struct ethtool_coalesce *ec)
746 {
747 	return __iavf_set_coalesce(netdev, ec, queue);
748 }
749 
750 /**
751  * iavf_fltr_to_ethtool_flow - convert filter type values to ethtool
752  * flow type values
753  * @flow: filter type to be converted
754  *
755  * Returns the corresponding ethtool flow type.
756  */
iavf_fltr_to_ethtool_flow(enum iavf_fdir_flow_type flow)757 static int iavf_fltr_to_ethtool_flow(enum iavf_fdir_flow_type flow)
758 {
759 	switch (flow) {
760 	case IAVF_FDIR_FLOW_IPV4_TCP:
761 		return TCP_V4_FLOW;
762 	case IAVF_FDIR_FLOW_IPV4_UDP:
763 		return UDP_V4_FLOW;
764 	case IAVF_FDIR_FLOW_IPV4_SCTP:
765 		return SCTP_V4_FLOW;
766 	case IAVF_FDIR_FLOW_IPV4_AH:
767 		return AH_V4_FLOW;
768 	case IAVF_FDIR_FLOW_IPV4_ESP:
769 		return ESP_V4_FLOW;
770 	case IAVF_FDIR_FLOW_IPV4_OTHER:
771 		return IPV4_USER_FLOW;
772 	case IAVF_FDIR_FLOW_IPV6_TCP:
773 		return TCP_V6_FLOW;
774 	case IAVF_FDIR_FLOW_IPV6_UDP:
775 		return UDP_V6_FLOW;
776 	case IAVF_FDIR_FLOW_IPV6_SCTP:
777 		return SCTP_V6_FLOW;
778 	case IAVF_FDIR_FLOW_IPV6_AH:
779 		return AH_V6_FLOW;
780 	case IAVF_FDIR_FLOW_IPV6_ESP:
781 		return ESP_V6_FLOW;
782 	case IAVF_FDIR_FLOW_IPV6_OTHER:
783 		return IPV6_USER_FLOW;
784 	case IAVF_FDIR_FLOW_NON_IP_L2:
785 		return ETHER_FLOW;
786 	default:
787 		/* 0 is undefined ethtool flow */
788 		return 0;
789 	}
790 }
791 
792 /**
793  * iavf_ethtool_flow_to_fltr - convert ethtool flow type to filter enum
794  * @eth: Ethtool flow type to be converted
795  *
796  * Returns flow enum
797  */
iavf_ethtool_flow_to_fltr(int eth)798 static enum iavf_fdir_flow_type iavf_ethtool_flow_to_fltr(int eth)
799 {
800 	switch (eth) {
801 	case TCP_V4_FLOW:
802 		return IAVF_FDIR_FLOW_IPV4_TCP;
803 	case UDP_V4_FLOW:
804 		return IAVF_FDIR_FLOW_IPV4_UDP;
805 	case SCTP_V4_FLOW:
806 		return IAVF_FDIR_FLOW_IPV4_SCTP;
807 	case AH_V4_FLOW:
808 		return IAVF_FDIR_FLOW_IPV4_AH;
809 	case ESP_V4_FLOW:
810 		return IAVF_FDIR_FLOW_IPV4_ESP;
811 	case IPV4_USER_FLOW:
812 		return IAVF_FDIR_FLOW_IPV4_OTHER;
813 	case TCP_V6_FLOW:
814 		return IAVF_FDIR_FLOW_IPV6_TCP;
815 	case UDP_V6_FLOW:
816 		return IAVF_FDIR_FLOW_IPV6_UDP;
817 	case SCTP_V6_FLOW:
818 		return IAVF_FDIR_FLOW_IPV6_SCTP;
819 	case AH_V6_FLOW:
820 		return IAVF_FDIR_FLOW_IPV6_AH;
821 	case ESP_V6_FLOW:
822 		return IAVF_FDIR_FLOW_IPV6_ESP;
823 	case IPV6_USER_FLOW:
824 		return IAVF_FDIR_FLOW_IPV6_OTHER;
825 	case ETHER_FLOW:
826 		return IAVF_FDIR_FLOW_NON_IP_L2;
827 	default:
828 		return IAVF_FDIR_FLOW_NONE;
829 	}
830 }
831 
832 /**
833  * iavf_is_mask_valid - check mask field set
834  * @mask: full mask to check
835  * @field: field for which mask should be valid
836  *
837  * If the mask is fully set return true. If it is not valid for field return
838  * false.
839  */
iavf_is_mask_valid(u64 mask,u64 field)840 static bool iavf_is_mask_valid(u64 mask, u64 field)
841 {
842 	return (mask & field) == field;
843 }
844 
845 /**
846  * iavf_parse_rx_flow_user_data - deconstruct user-defined data
847  * @fsp: pointer to ethtool Rx flow specification
848  * @fltr: pointer to Flow Director filter for userdef data storage
849  *
850  * Returns 0 on success, negative error value on failure
851  */
852 static int
iavf_parse_rx_flow_user_data(struct ethtool_rx_flow_spec * fsp,struct iavf_fdir_fltr * fltr)853 iavf_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
854 			     struct iavf_fdir_fltr *fltr)
855 {
856 	struct iavf_flex_word *flex;
857 	int i, cnt = 0;
858 
859 	if (!(fsp->flow_type & FLOW_EXT))
860 		return 0;
861 
862 	for (i = 0; i < IAVF_FLEX_WORD_NUM; i++) {
863 #define IAVF_USERDEF_FLEX_WORD_M	GENMASK(15, 0)
864 #define IAVF_USERDEF_FLEX_OFFS_S	16
865 #define IAVF_USERDEF_FLEX_OFFS_M	GENMASK(31, IAVF_USERDEF_FLEX_OFFS_S)
866 #define IAVF_USERDEF_FLEX_FLTR_M	GENMASK(31, 0)
867 		u32 value = be32_to_cpu(fsp->h_ext.data[i]);
868 		u32 mask = be32_to_cpu(fsp->m_ext.data[i]);
869 
870 		if (!value || !mask)
871 			continue;
872 
873 		if (!iavf_is_mask_valid(mask, IAVF_USERDEF_FLEX_FLTR_M))
874 			return -EINVAL;
875 
876 		/* 504 is the maximum value for offsets, and offset is measured
877 		 * from the start of the MAC address.
878 		 */
879 #define IAVF_USERDEF_FLEX_MAX_OFFS_VAL 504
880 		flex = &fltr->flex_words[cnt++];
881 		flex->word = value & IAVF_USERDEF_FLEX_WORD_M;
882 		flex->offset = FIELD_GET(IAVF_USERDEF_FLEX_OFFS_M, value);
883 		if (flex->offset > IAVF_USERDEF_FLEX_MAX_OFFS_VAL)
884 			return -EINVAL;
885 	}
886 
887 	fltr->flex_cnt = cnt;
888 
889 	return 0;
890 }
891 
892 /**
893  * iavf_fill_rx_flow_ext_data - fill the additional data
894  * @fsp: pointer to ethtool Rx flow specification
895  * @fltr: pointer to Flow Director filter to get additional data
896  */
897 static void
iavf_fill_rx_flow_ext_data(struct ethtool_rx_flow_spec * fsp,struct iavf_fdir_fltr * fltr)898 iavf_fill_rx_flow_ext_data(struct ethtool_rx_flow_spec *fsp,
899 			   struct iavf_fdir_fltr *fltr)
900 {
901 	if (!fltr->ext_mask.usr_def[0] && !fltr->ext_mask.usr_def[1])
902 		return;
903 
904 	fsp->flow_type |= FLOW_EXT;
905 
906 	memcpy(fsp->h_ext.data, fltr->ext_data.usr_def, sizeof(fsp->h_ext.data));
907 	memcpy(fsp->m_ext.data, fltr->ext_mask.usr_def, sizeof(fsp->m_ext.data));
908 }
909 
910 /**
911  * iavf_get_ethtool_fdir_entry - fill ethtool structure with Flow Director filter data
912  * @adapter: the VF adapter structure that contains filter list
913  * @cmd: ethtool command data structure to receive the filter data
914  *
915  * Returns 0 as expected for success by ethtool
916  */
917 static int
iavf_get_ethtool_fdir_entry(struct iavf_adapter * adapter,struct ethtool_rxnfc * cmd)918 iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter,
919 			    struct ethtool_rxnfc *cmd)
920 {
921 	struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
922 	struct iavf_fdir_fltr *rule = NULL;
923 	int ret = 0;
924 
925 	if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
926 		return -EOPNOTSUPP;
927 
928 	spin_lock_bh(&adapter->fdir_fltr_lock);
929 
930 	rule = iavf_find_fdir_fltr_by_loc(adapter, fsp->location);
931 	if (!rule) {
932 		ret = -EINVAL;
933 		goto release_lock;
934 	}
935 
936 	fsp->flow_type = iavf_fltr_to_ethtool_flow(rule->flow_type);
937 
938 	memset(&fsp->m_u, 0, sizeof(fsp->m_u));
939 	memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
940 
941 	switch (fsp->flow_type) {
942 	case TCP_V4_FLOW:
943 	case UDP_V4_FLOW:
944 	case SCTP_V4_FLOW:
945 		fsp->h_u.tcp_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
946 		fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
947 		fsp->h_u.tcp_ip4_spec.psrc = rule->ip_data.src_port;
948 		fsp->h_u.tcp_ip4_spec.pdst = rule->ip_data.dst_port;
949 		fsp->h_u.tcp_ip4_spec.tos = rule->ip_data.tos;
950 		fsp->m_u.tcp_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
951 		fsp->m_u.tcp_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
952 		fsp->m_u.tcp_ip4_spec.psrc = rule->ip_mask.src_port;
953 		fsp->m_u.tcp_ip4_spec.pdst = rule->ip_mask.dst_port;
954 		fsp->m_u.tcp_ip4_spec.tos = rule->ip_mask.tos;
955 		break;
956 	case AH_V4_FLOW:
957 	case ESP_V4_FLOW:
958 		fsp->h_u.ah_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
959 		fsp->h_u.ah_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
960 		fsp->h_u.ah_ip4_spec.spi = rule->ip_data.spi;
961 		fsp->h_u.ah_ip4_spec.tos = rule->ip_data.tos;
962 		fsp->m_u.ah_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
963 		fsp->m_u.ah_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
964 		fsp->m_u.ah_ip4_spec.spi = rule->ip_mask.spi;
965 		fsp->m_u.ah_ip4_spec.tos = rule->ip_mask.tos;
966 		break;
967 	case IPV4_USER_FLOW:
968 		fsp->h_u.usr_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
969 		fsp->h_u.usr_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
970 		fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip_data.l4_header;
971 		fsp->h_u.usr_ip4_spec.tos = rule->ip_data.tos;
972 		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
973 		fsp->h_u.usr_ip4_spec.proto = rule->ip_data.proto;
974 		fsp->m_u.usr_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
975 		fsp->m_u.usr_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
976 		fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->ip_mask.l4_header;
977 		fsp->m_u.usr_ip4_spec.tos = rule->ip_mask.tos;
978 		fsp->m_u.usr_ip4_spec.ip_ver = 0xFF;
979 		fsp->m_u.usr_ip4_spec.proto = rule->ip_mask.proto;
980 		break;
981 	case TCP_V6_FLOW:
982 	case UDP_V6_FLOW:
983 	case SCTP_V6_FLOW:
984 		memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
985 		       sizeof(struct in6_addr));
986 		memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
987 		       sizeof(struct in6_addr));
988 		fsp->h_u.tcp_ip6_spec.psrc = rule->ip_data.src_port;
989 		fsp->h_u.tcp_ip6_spec.pdst = rule->ip_data.dst_port;
990 		fsp->h_u.tcp_ip6_spec.tclass = rule->ip_data.tclass;
991 		memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
992 		       sizeof(struct in6_addr));
993 		memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
994 		       sizeof(struct in6_addr));
995 		fsp->m_u.tcp_ip6_spec.psrc = rule->ip_mask.src_port;
996 		fsp->m_u.tcp_ip6_spec.pdst = rule->ip_mask.dst_port;
997 		fsp->m_u.tcp_ip6_spec.tclass = rule->ip_mask.tclass;
998 		break;
999 	case AH_V6_FLOW:
1000 	case ESP_V6_FLOW:
1001 		memcpy(fsp->h_u.ah_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
1002 		       sizeof(struct in6_addr));
1003 		memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
1004 		       sizeof(struct in6_addr));
1005 		fsp->h_u.ah_ip6_spec.spi = rule->ip_data.spi;
1006 		fsp->h_u.ah_ip6_spec.tclass = rule->ip_data.tclass;
1007 		memcpy(fsp->m_u.ah_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
1008 		       sizeof(struct in6_addr));
1009 		memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
1010 		       sizeof(struct in6_addr));
1011 		fsp->m_u.ah_ip6_spec.spi = rule->ip_mask.spi;
1012 		fsp->m_u.ah_ip6_spec.tclass = rule->ip_mask.tclass;
1013 		break;
1014 	case IPV6_USER_FLOW:
1015 		memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
1016 		       sizeof(struct in6_addr));
1017 		memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
1018 		       sizeof(struct in6_addr));
1019 		fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip_data.l4_header;
1020 		fsp->h_u.usr_ip6_spec.tclass = rule->ip_data.tclass;
1021 		fsp->h_u.usr_ip6_spec.l4_proto = rule->ip_data.proto;
1022 		memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
1023 		       sizeof(struct in6_addr));
1024 		memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
1025 		       sizeof(struct in6_addr));
1026 		fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->ip_mask.l4_header;
1027 		fsp->m_u.usr_ip6_spec.tclass = rule->ip_mask.tclass;
1028 		fsp->m_u.usr_ip6_spec.l4_proto = rule->ip_mask.proto;
1029 		break;
1030 	case ETHER_FLOW:
1031 		fsp->h_u.ether_spec.h_proto = rule->eth_data.etype;
1032 		fsp->m_u.ether_spec.h_proto = rule->eth_mask.etype;
1033 		break;
1034 	default:
1035 		ret = -EINVAL;
1036 		break;
1037 	}
1038 
1039 	iavf_fill_rx_flow_ext_data(fsp, rule);
1040 
1041 	if (rule->action == VIRTCHNL_ACTION_DROP)
1042 		fsp->ring_cookie = RX_CLS_FLOW_DISC;
1043 	else
1044 		fsp->ring_cookie = rule->q_index;
1045 
1046 release_lock:
1047 	spin_unlock_bh(&adapter->fdir_fltr_lock);
1048 	return ret;
1049 }
1050 
1051 /**
1052  * iavf_get_fdir_fltr_ids - fill buffer with filter IDs of active filters
1053  * @adapter: the VF adapter structure containing the filter list
1054  * @cmd: ethtool command data structure
1055  * @rule_locs: ethtool array passed in from OS to receive filter IDs
1056  *
1057  * Returns 0 as expected for success by ethtool
1058  */
1059 static int
iavf_get_fdir_fltr_ids(struct iavf_adapter * adapter,struct ethtool_rxnfc * cmd,u32 * rule_locs)1060 iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd,
1061 		       u32 *rule_locs)
1062 {
1063 	struct iavf_fdir_fltr *fltr;
1064 	unsigned int cnt = 0;
1065 	int val = 0;
1066 
1067 	if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
1068 		return -EOPNOTSUPP;
1069 
1070 	cmd->data = IAVF_MAX_FDIR_FILTERS;
1071 
1072 	spin_lock_bh(&adapter->fdir_fltr_lock);
1073 
1074 	list_for_each_entry(fltr, &adapter->fdir_list_head, list) {
1075 		if (cnt == cmd->rule_cnt) {
1076 			val = -EMSGSIZE;
1077 			goto release_lock;
1078 		}
1079 		rule_locs[cnt] = fltr->loc;
1080 		cnt++;
1081 	}
1082 
1083 release_lock:
1084 	spin_unlock_bh(&adapter->fdir_fltr_lock);
1085 	if (!val)
1086 		cmd->rule_cnt = cnt;
1087 
1088 	return val;
1089 }
1090 
1091 /**
1092  * iavf_add_fdir_fltr_info - Set the input set for Flow Director filter
1093  * @adapter: pointer to the VF adapter structure
1094  * @fsp: pointer to ethtool Rx flow specification
1095  * @fltr: filter structure
1096  */
1097 static int
iavf_add_fdir_fltr_info(struct iavf_adapter * adapter,struct ethtool_rx_flow_spec * fsp,struct iavf_fdir_fltr * fltr)1098 iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spec *fsp,
1099 			struct iavf_fdir_fltr *fltr)
1100 {
1101 	u32 flow_type, q_index = 0;
1102 	enum virtchnl_action act;
1103 	int err;
1104 
1105 	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
1106 		act = VIRTCHNL_ACTION_DROP;
1107 	} else {
1108 		q_index = fsp->ring_cookie;
1109 		if (q_index >= adapter->num_active_queues)
1110 			return -EINVAL;
1111 
1112 		act = VIRTCHNL_ACTION_QUEUE;
1113 	}
1114 
1115 	fltr->action = act;
1116 	fltr->loc = fsp->location;
1117 	fltr->q_index = q_index;
1118 
1119 	if (fsp->flow_type & FLOW_EXT) {
1120 		memcpy(fltr->ext_data.usr_def, fsp->h_ext.data,
1121 		       sizeof(fltr->ext_data.usr_def));
1122 		memcpy(fltr->ext_mask.usr_def, fsp->m_ext.data,
1123 		       sizeof(fltr->ext_mask.usr_def));
1124 	}
1125 
1126 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
1127 	fltr->flow_type = iavf_ethtool_flow_to_fltr(flow_type);
1128 
1129 	switch (flow_type) {
1130 	case TCP_V4_FLOW:
1131 	case UDP_V4_FLOW:
1132 	case SCTP_V4_FLOW:
1133 		fltr->ip_data.v4_addrs.src_ip = fsp->h_u.tcp_ip4_spec.ip4src;
1134 		fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
1135 		fltr->ip_data.src_port = fsp->h_u.tcp_ip4_spec.psrc;
1136 		fltr->ip_data.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
1137 		fltr->ip_data.tos = fsp->h_u.tcp_ip4_spec.tos;
1138 		fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.tcp_ip4_spec.ip4src;
1139 		fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst;
1140 		fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc;
1141 		fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
1142 		fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos;
1143 		fltr->ip_ver = 4;
1144 		break;
1145 	case AH_V4_FLOW:
1146 	case ESP_V4_FLOW:
1147 		fltr->ip_data.v4_addrs.src_ip = fsp->h_u.ah_ip4_spec.ip4src;
1148 		fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.ah_ip4_spec.ip4dst;
1149 		fltr->ip_data.spi = fsp->h_u.ah_ip4_spec.spi;
1150 		fltr->ip_data.tos = fsp->h_u.ah_ip4_spec.tos;
1151 		fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.ah_ip4_spec.ip4src;
1152 		fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst;
1153 		fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi;
1154 		fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos;
1155 		fltr->ip_ver = 4;
1156 		break;
1157 	case IPV4_USER_FLOW:
1158 		fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
1159 		fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst;
1160 		fltr->ip_data.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes;
1161 		fltr->ip_data.tos = fsp->h_u.usr_ip4_spec.tos;
1162 		fltr->ip_data.proto = fsp->h_u.usr_ip4_spec.proto;
1163 		fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.usr_ip4_spec.ip4src;
1164 		fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst;
1165 		fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
1166 		fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos;
1167 		fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto;
1168 		fltr->ip_ver = 4;
1169 		break;
1170 	case TCP_V6_FLOW:
1171 	case UDP_V6_FLOW:
1172 	case SCTP_V6_FLOW:
1173 		memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1174 		       sizeof(struct in6_addr));
1175 		memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1176 		       sizeof(struct in6_addr));
1177 		fltr->ip_data.src_port = fsp->h_u.tcp_ip6_spec.psrc;
1178 		fltr->ip_data.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
1179 		fltr->ip_data.tclass = fsp->h_u.tcp_ip6_spec.tclass;
1180 		memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
1181 		       sizeof(struct in6_addr));
1182 		memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
1183 		       sizeof(struct in6_addr));
1184 		fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc;
1185 		fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
1186 		fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass;
1187 		fltr->ip_ver = 6;
1188 		break;
1189 	case AH_V6_FLOW:
1190 	case ESP_V6_FLOW:
1191 		memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.ah_ip6_spec.ip6src,
1192 		       sizeof(struct in6_addr));
1193 		memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.ah_ip6_spec.ip6dst,
1194 		       sizeof(struct in6_addr));
1195 		fltr->ip_data.spi = fsp->h_u.ah_ip6_spec.spi;
1196 		fltr->ip_data.tclass = fsp->h_u.ah_ip6_spec.tclass;
1197 		memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.ah_ip6_spec.ip6src,
1198 		       sizeof(struct in6_addr));
1199 		memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.ah_ip6_spec.ip6dst,
1200 		       sizeof(struct in6_addr));
1201 		fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi;
1202 		fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass;
1203 		fltr->ip_ver = 6;
1204 		break;
1205 	case IPV6_USER_FLOW:
1206 		memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1207 		       sizeof(struct in6_addr));
1208 		memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1209 		       sizeof(struct in6_addr));
1210 		fltr->ip_data.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
1211 		fltr->ip_data.tclass = fsp->h_u.usr_ip6_spec.tclass;
1212 		fltr->ip_data.proto = fsp->h_u.usr_ip6_spec.l4_proto;
1213 		memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
1214 		       sizeof(struct in6_addr));
1215 		memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
1216 		       sizeof(struct in6_addr));
1217 		fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
1218 		fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass;
1219 		fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto;
1220 		fltr->ip_ver = 6;
1221 		break;
1222 	case ETHER_FLOW:
1223 		fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto;
1224 		fltr->eth_mask.etype = fsp->m_u.ether_spec.h_proto;
1225 		break;
1226 	default:
1227 		/* not doing un-parsed flow types */
1228 		return -EINVAL;
1229 	}
1230 
1231 	err = iavf_validate_fdir_fltr_masks(adapter, fltr);
1232 	if (err)
1233 		return err;
1234 
1235 	if (iavf_fdir_is_dup_fltr(adapter, fltr))
1236 		return -EEXIST;
1237 
1238 	err = iavf_parse_rx_flow_user_data(fsp, fltr);
1239 	if (err)
1240 		return err;
1241 
1242 	return iavf_fill_fdir_add_msg(adapter, fltr);
1243 }
1244 
1245 /**
1246  * iavf_add_fdir_ethtool - add Flow Director filter
1247  * @adapter: pointer to the VF adapter structure
1248  * @cmd: command to add Flow Director filter
1249  *
1250  * Returns 0 on success and negative values for failure
1251  */
iavf_add_fdir_ethtool(struct iavf_adapter * adapter,struct ethtool_rxnfc * cmd)1252 static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
1253 {
1254 	struct ethtool_rx_flow_spec *fsp = &cmd->fs;
1255 	struct iavf_fdir_fltr *fltr;
1256 	int count = 50;
1257 	int err;
1258 
1259 	if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
1260 		return -EOPNOTSUPP;
1261 
1262 	if (fsp->flow_type & FLOW_MAC_EXT)
1263 		return -EINVAL;
1264 
1265 	spin_lock_bh(&adapter->fdir_fltr_lock);
1266 	if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) {
1267 		spin_unlock_bh(&adapter->fdir_fltr_lock);
1268 		dev_err(&adapter->pdev->dev,
1269 			"Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n",
1270 			IAVF_MAX_FDIR_FILTERS);
1271 		return -ENOSPC;
1272 	}
1273 
1274 	if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) {
1275 		dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n");
1276 		spin_unlock_bh(&adapter->fdir_fltr_lock);
1277 		return -EEXIST;
1278 	}
1279 	spin_unlock_bh(&adapter->fdir_fltr_lock);
1280 
1281 	fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
1282 	if (!fltr)
1283 		return -ENOMEM;
1284 
1285 	while (!mutex_trylock(&adapter->crit_lock)) {
1286 		if (--count == 0) {
1287 			kfree(fltr);
1288 			return -EINVAL;
1289 		}
1290 		udelay(1);
1291 	}
1292 
1293 	err = iavf_add_fdir_fltr_info(adapter, fsp, fltr);
1294 	if (err)
1295 		goto ret;
1296 
1297 	spin_lock_bh(&adapter->fdir_fltr_lock);
1298 	iavf_fdir_list_add_fltr(adapter, fltr);
1299 	adapter->fdir_active_fltr++;
1300 
1301 	if (adapter->link_up)
1302 		fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
1303 	else
1304 		fltr->state = IAVF_FDIR_FLTR_INACTIVE;
1305 	spin_unlock_bh(&adapter->fdir_fltr_lock);
1306 
1307 	if (adapter->link_up)
1308 		iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_FDIR_FILTER);
1309 ret:
1310 	if (err && fltr)
1311 		kfree(fltr);
1312 
1313 	mutex_unlock(&adapter->crit_lock);
1314 	return err;
1315 }
1316 
1317 /**
1318  * iavf_del_fdir_ethtool - delete Flow Director filter
1319  * @adapter: pointer to the VF adapter structure
1320  * @cmd: command to delete Flow Director filter
1321  *
1322  * Returns 0 on success and negative values for failure
1323  */
iavf_del_fdir_ethtool(struct iavf_adapter * adapter,struct ethtool_rxnfc * cmd)1324 static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
1325 {
1326 	struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
1327 	struct iavf_fdir_fltr *fltr = NULL;
1328 	int err = 0;
1329 
1330 	if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
1331 		return -EOPNOTSUPP;
1332 
1333 	spin_lock_bh(&adapter->fdir_fltr_lock);
1334 	fltr = iavf_find_fdir_fltr_by_loc(adapter, fsp->location);
1335 	if (fltr) {
1336 		if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
1337 			fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
1338 		} else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) {
1339 			list_del(&fltr->list);
1340 			kfree(fltr);
1341 			adapter->fdir_active_fltr--;
1342 			fltr = NULL;
1343 		} else {
1344 			err = -EBUSY;
1345 		}
1346 	} else if (adapter->fdir_active_fltr) {
1347 		err = -EINVAL;
1348 	}
1349 	spin_unlock_bh(&adapter->fdir_fltr_lock);
1350 
1351 	if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
1352 		iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_FDIR_FILTER);
1353 
1354 	return err;
1355 }
1356 
1357 /**
1358  * iavf_adv_rss_parse_hdrs - parses headers from RSS hash input
1359  * @cmd: ethtool rxnfc command
1360  *
1361  * This function parses the rxnfc command and returns intended
1362  * header types for RSS configuration
1363  */
iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc * cmd)1364 static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd)
1365 {
1366 	u32 hdrs = IAVF_ADV_RSS_FLOW_SEG_HDR_NONE;
1367 
1368 	switch (cmd->flow_type) {
1369 	case TCP_V4_FLOW:
1370 		hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP |
1371 			IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
1372 		break;
1373 	case UDP_V4_FLOW:
1374 		hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
1375 			IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
1376 		break;
1377 	case SCTP_V4_FLOW:
1378 		hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP |
1379 			IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
1380 		break;
1381 	case TCP_V6_FLOW:
1382 		hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP |
1383 			IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
1384 		break;
1385 	case UDP_V6_FLOW:
1386 		hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
1387 			IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
1388 		break;
1389 	case SCTP_V6_FLOW:
1390 		hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP |
1391 			IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
1392 		break;
1393 	default:
1394 		break;
1395 	}
1396 
1397 	return hdrs;
1398 }
1399 
1400 /**
1401  * iavf_adv_rss_parse_hash_flds - parses hash fields from RSS hash input
1402  * @cmd: ethtool rxnfc command
1403  * @symm: true if Symmetric Topelitz is set
1404  *
1405  * This function parses the rxnfc command and returns intended hash fields for
1406  * RSS configuration
1407  */
iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc * cmd,bool symm)1408 static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm)
1409 {
1410 	u64 hfld = IAVF_ADV_RSS_HASH_INVALID;
1411 
1412 	if (cmd->data & RXH_IP_SRC || cmd->data & RXH_IP_DST) {
1413 		switch (cmd->flow_type) {
1414 		case TCP_V4_FLOW:
1415 		case UDP_V4_FLOW:
1416 		case SCTP_V4_FLOW:
1417 			if (cmd->data & RXH_IP_SRC)
1418 				hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_SA;
1419 			if (cmd->data & RXH_IP_DST)
1420 				hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_DA;
1421 			break;
1422 		case TCP_V6_FLOW:
1423 		case UDP_V6_FLOW:
1424 		case SCTP_V6_FLOW:
1425 			if (cmd->data & RXH_IP_SRC)
1426 				hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_SA;
1427 			if (cmd->data & RXH_IP_DST)
1428 				hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_DA;
1429 			break;
1430 		default:
1431 			break;
1432 		}
1433 	}
1434 
1435 	if (cmd->data & RXH_L4_B_0_1 || cmd->data & RXH_L4_B_2_3) {
1436 		switch (cmd->flow_type) {
1437 		case TCP_V4_FLOW:
1438 		case TCP_V6_FLOW:
1439 			if (cmd->data & RXH_L4_B_0_1)
1440 				hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT;
1441 			if (cmd->data & RXH_L4_B_2_3)
1442 				hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT;
1443 			break;
1444 		case UDP_V4_FLOW:
1445 		case UDP_V6_FLOW:
1446 			if (cmd->data & RXH_L4_B_0_1)
1447 				hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT;
1448 			if (cmd->data & RXH_L4_B_2_3)
1449 				hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT;
1450 			break;
1451 		case SCTP_V4_FLOW:
1452 		case SCTP_V6_FLOW:
1453 			if (cmd->data & RXH_L4_B_0_1)
1454 				hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT;
1455 			if (cmd->data & RXH_L4_B_2_3)
1456 				hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT;
1457 			break;
1458 		default:
1459 			break;
1460 		}
1461 	}
1462 
1463 	return hfld;
1464 }
1465 
1466 /**
1467  * iavf_set_adv_rss_hash_opt - Enable/Disable flow types for RSS hash
1468  * @adapter: pointer to the VF adapter structure
1469  * @cmd: ethtool rxnfc command
1470  *
1471  * Returns Success if the flow input set is supported.
1472  */
1473 static int
iavf_set_adv_rss_hash_opt(struct iavf_adapter * adapter,struct ethtool_rxnfc * cmd)1474 iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
1475 			  struct ethtool_rxnfc *cmd)
1476 {
1477 	struct iavf_adv_rss *rss_old, *rss_new;
1478 	bool rss_new_add = false;
1479 	int count = 50, err = 0;
1480 	bool symm = false;
1481 	u64 hash_flds;
1482 	u32 hdrs;
1483 
1484 	if (!ADV_RSS_SUPPORT(adapter))
1485 		return -EOPNOTSUPP;
1486 
1487 	symm = !!(adapter->hfunc == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC);
1488 
1489 	hdrs = iavf_adv_rss_parse_hdrs(cmd);
1490 	if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE)
1491 		return -EINVAL;
1492 
1493 	hash_flds = iavf_adv_rss_parse_hash_flds(cmd, symm);
1494 	if (hash_flds == IAVF_ADV_RSS_HASH_INVALID)
1495 		return -EINVAL;
1496 
1497 	rss_new = kzalloc(sizeof(*rss_new), GFP_KERNEL);
1498 	if (!rss_new)
1499 		return -ENOMEM;
1500 
1501 	if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds,
1502 				      symm)) {
1503 		kfree(rss_new);
1504 		return -EINVAL;
1505 	}
1506 
1507 	while (!mutex_trylock(&adapter->crit_lock)) {
1508 		if (--count == 0) {
1509 			kfree(rss_new);
1510 			return -EINVAL;
1511 		}
1512 
1513 		udelay(1);
1514 	}
1515 
1516 	spin_lock_bh(&adapter->adv_rss_lock);
1517 	rss_old = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs);
1518 	if (rss_old) {
1519 		if (rss_old->state != IAVF_ADV_RSS_ACTIVE) {
1520 			err = -EBUSY;
1521 		} else if (rss_old->hash_flds != hash_flds ||
1522 			   rss_old->symm != symm) {
1523 			rss_old->state = IAVF_ADV_RSS_ADD_REQUEST;
1524 			rss_old->hash_flds = hash_flds;
1525 			rss_old->symm = symm;
1526 			memcpy(&rss_old->cfg_msg, &rss_new->cfg_msg,
1527 			       sizeof(rss_new->cfg_msg));
1528 		} else {
1529 			err = -EEXIST;
1530 		}
1531 	} else {
1532 		rss_new_add = true;
1533 		rss_new->state = IAVF_ADV_RSS_ADD_REQUEST;
1534 		rss_new->packet_hdrs = hdrs;
1535 		rss_new->hash_flds = hash_flds;
1536 		rss_new->symm = symm;
1537 		list_add_tail(&rss_new->list, &adapter->adv_rss_list_head);
1538 	}
1539 	spin_unlock_bh(&adapter->adv_rss_lock);
1540 
1541 	if (!err)
1542 		iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
1543 
1544 	mutex_unlock(&adapter->crit_lock);
1545 
1546 	if (!rss_new_add)
1547 		kfree(rss_new);
1548 
1549 	return err;
1550 }
1551 
1552 /**
1553  * iavf_get_adv_rss_hash_opt - Retrieve hash fields for a given flow-type
1554  * @adapter: pointer to the VF adapter structure
1555  * @cmd: ethtool rxnfc command
1556  *
1557  * Returns Success if the flow input set is supported.
1558  */
1559 static int
iavf_get_adv_rss_hash_opt(struct iavf_adapter * adapter,struct ethtool_rxnfc * cmd)1560 iavf_get_adv_rss_hash_opt(struct iavf_adapter *adapter,
1561 			  struct ethtool_rxnfc *cmd)
1562 {
1563 	struct iavf_adv_rss *rss;
1564 	u64 hash_flds;
1565 	u32 hdrs;
1566 
1567 	if (!ADV_RSS_SUPPORT(adapter))
1568 		return -EOPNOTSUPP;
1569 
1570 	cmd->data = 0;
1571 
1572 	hdrs = iavf_adv_rss_parse_hdrs(cmd);
1573 	if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE)
1574 		return -EINVAL;
1575 
1576 	spin_lock_bh(&adapter->adv_rss_lock);
1577 	rss = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs);
1578 	if (rss)
1579 		hash_flds = rss->hash_flds;
1580 	else
1581 		hash_flds = IAVF_ADV_RSS_HASH_INVALID;
1582 	spin_unlock_bh(&adapter->adv_rss_lock);
1583 
1584 	if (hash_flds == IAVF_ADV_RSS_HASH_INVALID)
1585 		return -EINVAL;
1586 
1587 	if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_SA |
1588 			 IAVF_ADV_RSS_HASH_FLD_IPV6_SA))
1589 		cmd->data |= (u64)RXH_IP_SRC;
1590 
1591 	if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_DA |
1592 			 IAVF_ADV_RSS_HASH_FLD_IPV6_DA))
1593 		cmd->data |= (u64)RXH_IP_DST;
1594 
1595 	if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT |
1596 			 IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT |
1597 			 IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT))
1598 		cmd->data |= (u64)RXH_L4_B_0_1;
1599 
1600 	if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT |
1601 			 IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT |
1602 			 IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT))
1603 		cmd->data |= (u64)RXH_L4_B_2_3;
1604 
1605 	return 0;
1606 }
1607 
1608 /**
1609  * iavf_set_rxnfc - command to set Rx flow rules.
1610  * @netdev: network interface device structure
1611  * @cmd: ethtool rxnfc command
1612  *
1613  * Returns 0 for success and negative values for errors
1614  */
iavf_set_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd)1615 static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1616 {
1617 	struct iavf_adapter *adapter = netdev_priv(netdev);
1618 	int ret = -EOPNOTSUPP;
1619 
1620 	switch (cmd->cmd) {
1621 	case ETHTOOL_SRXCLSRLINS:
1622 		ret = iavf_add_fdir_ethtool(adapter, cmd);
1623 		break;
1624 	case ETHTOOL_SRXCLSRLDEL:
1625 		ret = iavf_del_fdir_ethtool(adapter, cmd);
1626 		break;
1627 	case ETHTOOL_SRXFH:
1628 		ret = iavf_set_adv_rss_hash_opt(adapter, cmd);
1629 		break;
1630 	default:
1631 		break;
1632 	}
1633 
1634 	return ret;
1635 }
1636 
1637 /**
1638  * iavf_get_rxnfc - command to get RX flow classification rules
1639  * @netdev: network interface device structure
1640  * @cmd: ethtool rxnfc command
1641  * @rule_locs: pointer to store rule locations
1642  *
1643  * Returns Success if the command is supported.
1644  **/
iavf_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 * rule_locs)1645 static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1646 			  u32 *rule_locs)
1647 {
1648 	struct iavf_adapter *adapter = netdev_priv(netdev);
1649 	int ret = -EOPNOTSUPP;
1650 
1651 	switch (cmd->cmd) {
1652 	case ETHTOOL_GRXRINGS:
1653 		cmd->data = adapter->num_active_queues;
1654 		ret = 0;
1655 		break;
1656 	case ETHTOOL_GRXCLSRLCNT:
1657 		if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
1658 			break;
1659 		spin_lock_bh(&adapter->fdir_fltr_lock);
1660 		cmd->rule_cnt = adapter->fdir_active_fltr;
1661 		spin_unlock_bh(&adapter->fdir_fltr_lock);
1662 		cmd->data = IAVF_MAX_FDIR_FILTERS;
1663 		ret = 0;
1664 		break;
1665 	case ETHTOOL_GRXCLSRULE:
1666 		ret = iavf_get_ethtool_fdir_entry(adapter, cmd);
1667 		break;
1668 	case ETHTOOL_GRXCLSRLALL:
1669 		ret = iavf_get_fdir_fltr_ids(adapter, cmd, (u32 *)rule_locs);
1670 		break;
1671 	case ETHTOOL_GRXFH:
1672 		ret = iavf_get_adv_rss_hash_opt(adapter, cmd);
1673 		break;
1674 	default:
1675 		break;
1676 	}
1677 
1678 	return ret;
1679 }
1680 /**
1681  * iavf_get_channels: get the number of channels supported by the device
1682  * @netdev: network interface device structure
1683  * @ch: channel information structure
1684  *
1685  * For the purposes of our device, we only use combined channels, i.e. a tx/rx
1686  * queue pair. Report one extra channel to match our "other" MSI-X vector.
1687  **/
iavf_get_channels(struct net_device * netdev,struct ethtool_channels * ch)1688 static void iavf_get_channels(struct net_device *netdev,
1689 			      struct ethtool_channels *ch)
1690 {
1691 	struct iavf_adapter *adapter = netdev_priv(netdev);
1692 
1693 	/* Report maximum channels */
1694 	ch->max_combined = adapter->vsi_res->num_queue_pairs;
1695 
1696 	ch->max_other = NONQ_VECS;
1697 	ch->other_count = NONQ_VECS;
1698 
1699 	ch->combined_count = adapter->num_active_queues;
1700 }
1701 
1702 /**
1703  * iavf_set_channels: set the new channel count
1704  * @netdev: network interface device structure
1705  * @ch: channel information structure
1706  *
1707  * Negotiate a new number of channels with the PF then do a reset.  During
1708  * reset we'll realloc queues and fix the RSS table.  Returns 0 on success,
1709  * negative on failure.
1710  **/
iavf_set_channels(struct net_device * netdev,struct ethtool_channels * ch)1711 static int iavf_set_channels(struct net_device *netdev,
1712 			     struct ethtool_channels *ch)
1713 {
1714 	struct iavf_adapter *adapter = netdev_priv(netdev);
1715 	u32 num_req = ch->combined_count;
1716 	int ret = 0;
1717 
1718 	if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1719 	    adapter->num_tc) {
1720 		dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n");
1721 		return -EINVAL;
1722 	}
1723 
1724 	/* All of these should have already been checked by ethtool before this
1725 	 * even gets to us, but just to be sure.
1726 	 */
1727 	if (num_req == 0 || num_req > adapter->vsi_res->num_queue_pairs)
1728 		return -EINVAL;
1729 
1730 	if (num_req == adapter->num_active_queues)
1731 		return 0;
1732 
1733 	if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS)
1734 		return -EINVAL;
1735 
1736 	adapter->num_req_queues = num_req;
1737 	adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1738 	iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
1739 
1740 	ret = iavf_wait_for_reset(adapter);
1741 	if (ret)
1742 		netdev_warn(netdev, "Changing channel count timeout or interrupted waiting for reset");
1743 
1744 	return ret;
1745 }
1746 
1747 /**
1748  * iavf_get_rxfh_key_size - get the RSS hash key size
1749  * @netdev: network interface device structure
1750  *
1751  * Returns the table size.
1752  **/
iavf_get_rxfh_key_size(struct net_device * netdev)1753 static u32 iavf_get_rxfh_key_size(struct net_device *netdev)
1754 {
1755 	struct iavf_adapter *adapter = netdev_priv(netdev);
1756 
1757 	return adapter->rss_key_size;
1758 }
1759 
1760 /**
1761  * iavf_get_rxfh_indir_size - get the rx flow hash indirection table size
1762  * @netdev: network interface device structure
1763  *
1764  * Returns the table size.
1765  **/
iavf_get_rxfh_indir_size(struct net_device * netdev)1766 static u32 iavf_get_rxfh_indir_size(struct net_device *netdev)
1767 {
1768 	struct iavf_adapter *adapter = netdev_priv(netdev);
1769 
1770 	return adapter->rss_lut_size;
1771 }
1772 
1773 /**
1774  * iavf_get_rxfh - get the rx flow hash indirection table
1775  * @netdev: network interface device structure
1776  * @rxfh: pointer to param struct (indir, key, hfunc)
1777  *
1778  * Reads the indirection table directly from the hardware. Always returns 0.
1779  **/
iavf_get_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh)1780 static int iavf_get_rxfh(struct net_device *netdev,
1781 			 struct ethtool_rxfh_param *rxfh)
1782 {
1783 	struct iavf_adapter *adapter = netdev_priv(netdev);
1784 	u16 i;
1785 
1786 	rxfh->hfunc = ETH_RSS_HASH_TOP;
1787 	if (adapter->hfunc == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
1788 		rxfh->input_xfrm |= RXH_XFRM_SYM_XOR;
1789 
1790 	if (rxfh->key)
1791 		memcpy(rxfh->key, adapter->rss_key, adapter->rss_key_size);
1792 
1793 	if (rxfh->indir)
1794 		/* Each 32 bits pointed by 'indir' is stored with a lut entry */
1795 		for (i = 0; i < adapter->rss_lut_size; i++)
1796 			rxfh->indir[i] = (u32)adapter->rss_lut[i];
1797 
1798 	return 0;
1799 }
1800 
1801 /**
1802  * iavf_set_rxfh - set the rx flow hash indirection table
1803  * @netdev: network interface device structure
1804  * @rxfh: pointer to param struct (indir, key, hfunc)
1805  * @extack: extended ACK from the Netlink message
1806  *
1807  * Returns -EINVAL if the table specifies an invalid queue id, otherwise
1808  * returns 0 after programming the table.
1809  **/
iavf_set_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)1810 static int iavf_set_rxfh(struct net_device *netdev,
1811 			 struct ethtool_rxfh_param *rxfh,
1812 			 struct netlink_ext_ack *extack)
1813 {
1814 	struct iavf_adapter *adapter = netdev_priv(netdev);
1815 	u16 i;
1816 
1817 	/* Only support toeplitz hash function */
1818 	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
1819 	    rxfh->hfunc != ETH_RSS_HASH_TOP)
1820 		return -EOPNOTSUPP;
1821 
1822 	if ((rxfh->input_xfrm & RXH_XFRM_SYM_XOR) &&
1823 	    adapter->hfunc != VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) {
1824 		if (!ADV_RSS_SUPPORT(adapter))
1825 			return -EOPNOTSUPP;
1826 		adapter->hfunc = VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
1827 		adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_HFUNC;
1828 	} else if (!(rxfh->input_xfrm & RXH_XFRM_SYM_XOR) &&
1829 		    adapter->hfunc != VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC) {
1830 		adapter->hfunc = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
1831 		adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_HFUNC;
1832 	}
1833 
1834 	if (!rxfh->key && !rxfh->indir)
1835 		return 0;
1836 
1837 	if (rxfh->key)
1838 		memcpy(adapter->rss_key, rxfh->key, adapter->rss_key_size);
1839 
1840 	if (rxfh->indir) {
1841 		/* Each 32 bits pointed by 'indir' is stored with a lut entry */
1842 		for (i = 0; i < adapter->rss_lut_size; i++)
1843 			adapter->rss_lut[i] = (u8)(rxfh->indir[i]);
1844 	}
1845 
1846 	return iavf_config_rss(adapter);
1847 }
1848 
1849 static const struct ethtool_ops iavf_ethtool_ops = {
1850 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1851 				     ETHTOOL_COALESCE_USE_ADAPTIVE,
1852 	.cap_rss_sym_xor_supported = true,
1853 	.get_drvinfo		= iavf_get_drvinfo,
1854 	.get_link		= ethtool_op_get_link,
1855 	.get_ringparam		= iavf_get_ringparam,
1856 	.set_ringparam		= iavf_set_ringparam,
1857 	.get_strings		= iavf_get_strings,
1858 	.get_ethtool_stats	= iavf_get_ethtool_stats,
1859 	.get_sset_count		= iavf_get_sset_count,
1860 	.get_msglevel		= iavf_get_msglevel,
1861 	.set_msglevel		= iavf_set_msglevel,
1862 	.get_coalesce		= iavf_get_coalesce,
1863 	.set_coalesce		= iavf_set_coalesce,
1864 	.get_per_queue_coalesce = iavf_get_per_queue_coalesce,
1865 	.set_per_queue_coalesce = iavf_set_per_queue_coalesce,
1866 	.set_rxnfc		= iavf_set_rxnfc,
1867 	.get_rxnfc		= iavf_get_rxnfc,
1868 	.get_rxfh_indir_size	= iavf_get_rxfh_indir_size,
1869 	.get_rxfh		= iavf_get_rxfh,
1870 	.set_rxfh		= iavf_set_rxfh,
1871 	.get_channels		= iavf_get_channels,
1872 	.set_channels		= iavf_set_channels,
1873 	.get_rxfh_key_size	= iavf_get_rxfh_key_size,
1874 	.get_link_ksettings	= iavf_get_link_ksettings,
1875 };
1876 
1877 /**
1878  * iavf_set_ethtool_ops - Initialize ethtool ops struct
1879  * @netdev: network interface device structure
1880  *
1881  * Sets ethtool ops struct in our netdev so that ethtool can call
1882  * our functions.
1883  **/
iavf_set_ethtool_ops(struct net_device * netdev)1884 void iavf_set_ethtool_ops(struct net_device *netdev)
1885 {
1886 	netdev->ethtool_ops = &iavf_ethtool_ops;
1887 }
1888