1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4 /* ethtool support for i40e */
5
6 #include "i40e.h"
7 #include "i40e_diag.h"
8 #include "i40e_txrx_common.h"
9
10 /* ethtool statistics helpers */
11
12 /**
13 * struct i40e_stats - definition for an ethtool statistic
14 * @stat_string: statistic name to display in ethtool -S output
15 * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64)
16 * @stat_offset: offsetof() the stat from a base pointer
17 *
18 * This structure defines a statistic to be added to the ethtool stats buffer.
19 * It defines a statistic as offset from a common base pointer. Stats should
20 * be defined in constant arrays using the I40E_STAT macro, with every element
21 * of the array using the same _type for calculating the sizeof_stat and
22 * stat_offset.
23 *
24 * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or
25 * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from
26 * the i40e_add_ethtool_stat() helper function.
27 *
28 * The @stat_string is interpreted as a format string, allowing formatted
29 * values to be inserted while looping over multiple structures for a given
30 * statistics array. Thus, every statistic string in an array should have the
31 * same type and number of format specifiers, to be formatted by variadic
32 * arguments to the i40e_add_stat_string() helper function.
33 **/
34 struct i40e_stats {
35 char stat_string[ETH_GSTRING_LEN];
36 int sizeof_stat;
37 int stat_offset;
38 };
39
40 /* Helper macro to define an i40e_stat structure with proper size and type.
41 * Use this when defining constant statistics arrays. Note that @_type expects
42 * only a type name and is used multiple times.
43 */
44 #define I40E_STAT(_type, _name, _stat) { \
45 .stat_string = _name, \
46 .sizeof_stat = sizeof_field(_type, _stat), \
47 .stat_offset = offsetof(_type, _stat) \
48 }
49
50 /* Helper macro for defining some statistics directly copied from the netdev
51 * stats structure.
52 */
53 #define I40E_NETDEV_STAT(_net_stat) \
54 I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat)
55
56 /* Helper macro for defining some statistics related to queues */
57 #define I40E_QUEUE_STAT(_name, _stat) \
58 I40E_STAT(struct i40e_ring, _name, _stat)
59
60 /* Stats associated with a Tx or Rx ring */
61 static const struct i40e_stats i40e_gstrings_queue_stats[] = {
62 I40E_QUEUE_STAT("%s-%u.packets", stats.packets),
63 I40E_QUEUE_STAT("%s-%u.bytes", stats.bytes),
64 };
65
66 /**
67 * i40e_add_one_ethtool_stat - copy the stat into the supplied buffer
68 * @data: location to store the stat value
69 * @pointer: basis for where to copy from
70 * @stat: the stat definition
71 *
72 * Copies the stat data defined by the pointer and stat structure pair into
73 * the memory supplied as data. Used to implement i40e_add_ethtool_stats and
74 * i40e_add_queue_stats. If the pointer is null, data will be zero'd.
75 */
76 static void
i40e_add_one_ethtool_stat(u64 * data,void * pointer,const struct i40e_stats * stat)77 i40e_add_one_ethtool_stat(u64 *data, void *pointer,
78 const struct i40e_stats *stat)
79 {
80 char *p;
81
82 if (!pointer) {
83 /* ensure that the ethtool data buffer is zero'd for any stats
84 * which don't have a valid pointer.
85 */
86 *data = 0;
87 return;
88 }
89
90 p = (char *)pointer + stat->stat_offset;
91 switch (stat->sizeof_stat) {
92 case sizeof(u64):
93 *data = *((u64 *)p);
94 break;
95 case sizeof(u32):
96 *data = *((u32 *)p);
97 break;
98 case sizeof(u16):
99 *data = *((u16 *)p);
100 break;
101 case sizeof(u8):
102 *data = *((u8 *)p);
103 break;
104 default:
105 WARN_ONCE(1, "unexpected stat size for %s",
106 stat->stat_string);
107 *data = 0;
108 }
109 }
110
111 /**
112 * __i40e_add_ethtool_stats - copy stats into the ethtool supplied buffer
113 * @data: ethtool stats buffer
114 * @pointer: location to copy stats from
115 * @stats: array of stats to copy
116 * @size: the size of the stats definition
117 *
118 * Copy the stats defined by the stats array using the pointer as a base into
119 * the data buffer supplied by ethtool. Updates the data pointer to point to
120 * the next empty location for successive calls to __i40e_add_ethtool_stats.
121 * If pointer is null, set the data values to zero and update the pointer to
122 * skip these stats.
123 **/
124 static void
__i40e_add_ethtool_stats(u64 ** data,void * pointer,const struct i40e_stats stats[],const unsigned int size)125 __i40e_add_ethtool_stats(u64 **data, void *pointer,
126 const struct i40e_stats stats[],
127 const unsigned int size)
128 {
129 unsigned int i;
130
131 for (i = 0; i < size; i++)
132 i40e_add_one_ethtool_stat((*data)++, pointer, &stats[i]);
133 }
134
135 /**
136 * i40e_add_ethtool_stats - copy stats into ethtool supplied buffer
137 * @data: ethtool stats buffer
138 * @pointer: location where stats are stored
139 * @stats: static const array of stat definitions
140 *
141 * Macro to ease the use of __i40e_add_ethtool_stats by taking a static
142 * constant stats array and passing the ARRAY_SIZE(). This avoids typos by
143 * ensuring that we pass the size associated with the given stats array.
144 *
145 * The parameter @stats is evaluated twice, so parameters with side effects
146 * should be avoided.
147 **/
148 #define i40e_add_ethtool_stats(data, pointer, stats) \
149 __i40e_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats))
150
151 /**
152 * i40e_add_queue_stats - copy queue statistics into supplied buffer
153 * @data: ethtool stats buffer
154 * @ring: the ring to copy
155 *
156 * Queue statistics must be copied while protected by
157 * u64_stats_fetch_begin_irq, so we can't directly use i40e_add_ethtool_stats.
158 * Assumes that queue stats are defined in i40e_gstrings_queue_stats. If the
159 * ring pointer is null, zero out the queue stat values and update the data
160 * pointer. Otherwise safely copy the stats from the ring into the supplied
161 * buffer and update the data pointer when finished.
162 *
163 * This function expects to be called while under rcu_read_lock().
164 **/
165 static void
i40e_add_queue_stats(u64 ** data,struct i40e_ring * ring)166 i40e_add_queue_stats(u64 **data, struct i40e_ring *ring)
167 {
168 const unsigned int size = ARRAY_SIZE(i40e_gstrings_queue_stats);
169 const struct i40e_stats *stats = i40e_gstrings_queue_stats;
170 unsigned int start;
171 unsigned int i;
172
173 /* To avoid invalid statistics values, ensure that we keep retrying
174 * the copy until we get a consistent value according to
175 * u64_stats_fetch_retry_irq. But first, make sure our ring is
176 * non-null before attempting to access its syncp.
177 */
178 do {
179 start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp);
180 for (i = 0; i < size; i++) {
181 i40e_add_one_ethtool_stat(&(*data)[i], ring,
182 &stats[i]);
183 }
184 } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start));
185
186 /* Once we successfully copy the stats in, update the data pointer */
187 *data += size;
188 }
189
190 /**
191 * __i40e_add_stat_strings - copy stat strings into ethtool buffer
192 * @p: ethtool supplied buffer
193 * @stats: stat definitions array
194 * @size: size of the stats array
195 *
196 * Format and copy the strings described by stats into the buffer pointed at
197 * by p.
198 **/
__i40e_add_stat_strings(u8 ** p,const struct i40e_stats stats[],const unsigned int size,...)199 static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
200 const unsigned int size, ...)
201 {
202 unsigned int i;
203
204 for (i = 0; i < size; i++) {
205 va_list args;
206
207 va_start(args, size);
208 vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args);
209 *p += ETH_GSTRING_LEN;
210 va_end(args);
211 }
212 }
213
214 /**
215 * i40e_add_stat_strings - copy stat strings into ethtool buffer
216 * @p: ethtool supplied buffer
217 * @stats: stat definitions array
218 *
219 * Format and copy the strings described by the const static stats value into
220 * the buffer pointed at by p.
221 *
222 * The parameter @stats is evaluated twice, so parameters with side effects
223 * should be avoided. Additionally, stats must be an array such that
224 * ARRAY_SIZE can be called on it.
225 **/
226 #define i40e_add_stat_strings(p, stats, ...) \
227 __i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
228
229 #define I40E_PF_STAT(_name, _stat) \
230 I40E_STAT(struct i40e_pf, _name, _stat)
231 #define I40E_VSI_STAT(_name, _stat) \
232 I40E_STAT(struct i40e_vsi, _name, _stat)
233 #define I40E_VEB_STAT(_name, _stat) \
234 I40E_STAT(struct i40e_veb, _name, _stat)
235 #define I40E_VEB_TC_STAT(_name, _stat) \
236 I40E_STAT(struct i40e_cp_veb_tc_stats, _name, _stat)
237 #define I40E_PFC_STAT(_name, _stat) \
238 I40E_STAT(struct i40e_pfc_stats, _name, _stat)
239 #define I40E_QUEUE_STAT(_name, _stat) \
240 I40E_STAT(struct i40e_ring, _name, _stat)
241
242 static const struct i40e_stats i40e_gstrings_net_stats[] = {
243 I40E_NETDEV_STAT(rx_packets),
244 I40E_NETDEV_STAT(tx_packets),
245 I40E_NETDEV_STAT(rx_bytes),
246 I40E_NETDEV_STAT(tx_bytes),
247 I40E_NETDEV_STAT(rx_errors),
248 I40E_NETDEV_STAT(tx_errors),
249 I40E_NETDEV_STAT(rx_dropped),
250 I40E_NETDEV_STAT(tx_dropped),
251 I40E_NETDEV_STAT(collisions),
252 I40E_NETDEV_STAT(rx_length_errors),
253 I40E_NETDEV_STAT(rx_crc_errors),
254 };
255
256 static const struct i40e_stats i40e_gstrings_veb_stats[] = {
257 I40E_VEB_STAT("veb.rx_bytes", stats.rx_bytes),
258 I40E_VEB_STAT("veb.tx_bytes", stats.tx_bytes),
259 I40E_VEB_STAT("veb.rx_unicast", stats.rx_unicast),
260 I40E_VEB_STAT("veb.tx_unicast", stats.tx_unicast),
261 I40E_VEB_STAT("veb.rx_multicast", stats.rx_multicast),
262 I40E_VEB_STAT("veb.tx_multicast", stats.tx_multicast),
263 I40E_VEB_STAT("veb.rx_broadcast", stats.rx_broadcast),
264 I40E_VEB_STAT("veb.tx_broadcast", stats.tx_broadcast),
265 I40E_VEB_STAT("veb.rx_discards", stats.rx_discards),
266 I40E_VEB_STAT("veb.tx_discards", stats.tx_discards),
267 I40E_VEB_STAT("veb.tx_errors", stats.tx_errors),
268 I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol),
269 };
270
271 struct i40e_cp_veb_tc_stats {
272 u64 tc_rx_packets;
273 u64 tc_rx_bytes;
274 u64 tc_tx_packets;
275 u64 tc_tx_bytes;
276 };
277
278 static const struct i40e_stats i40e_gstrings_veb_tc_stats[] = {
279 I40E_VEB_TC_STAT("veb.tc_%u_tx_packets", tc_tx_packets),
280 I40E_VEB_TC_STAT("veb.tc_%u_tx_bytes", tc_tx_bytes),
281 I40E_VEB_TC_STAT("veb.tc_%u_rx_packets", tc_rx_packets),
282 I40E_VEB_TC_STAT("veb.tc_%u_rx_bytes", tc_rx_bytes),
283 };
284
285 static const struct i40e_stats i40e_gstrings_misc_stats[] = {
286 I40E_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
287 I40E_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
288 I40E_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
289 I40E_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
290 I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
291 I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
292 I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
293 I40E_VSI_STAT("tx_linearize", tx_linearize),
294 I40E_VSI_STAT("tx_force_wb", tx_force_wb),
295 I40E_VSI_STAT("tx_busy", tx_busy),
296 I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
297 I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
298 };
299
300 /* These PF_STATs might look like duplicates of some NETDEV_STATs,
301 * but they are separate. This device supports Virtualization, and
302 * as such might have several netdevs supporting VMDq and FCoE going
303 * through a single port. The NETDEV_STATs are for individual netdevs
304 * seen at the top of the stack, and the PF_STATs are for the physical
305 * function at the bottom of the stack hosting those netdevs.
306 *
307 * The PF_STATs are appended to the netdev stats only when ethtool -S
308 * is queried on the base PF netdev, not on the VMDq or FCoE netdev.
309 */
310 static const struct i40e_stats i40e_gstrings_stats[] = {
311 I40E_PF_STAT("port.rx_bytes", stats.eth.rx_bytes),
312 I40E_PF_STAT("port.tx_bytes", stats.eth.tx_bytes),
313 I40E_PF_STAT("port.rx_unicast", stats.eth.rx_unicast),
314 I40E_PF_STAT("port.tx_unicast", stats.eth.tx_unicast),
315 I40E_PF_STAT("port.rx_multicast", stats.eth.rx_multicast),
316 I40E_PF_STAT("port.tx_multicast", stats.eth.tx_multicast),
317 I40E_PF_STAT("port.rx_broadcast", stats.eth.rx_broadcast),
318 I40E_PF_STAT("port.tx_broadcast", stats.eth.tx_broadcast),
319 I40E_PF_STAT("port.tx_errors", stats.eth.tx_errors),
320 I40E_PF_STAT("port.rx_dropped", stats.eth.rx_discards),
321 I40E_PF_STAT("port.tx_dropped_link_down", stats.tx_dropped_link_down),
322 I40E_PF_STAT("port.rx_crc_errors", stats.crc_errors),
323 I40E_PF_STAT("port.illegal_bytes", stats.illegal_bytes),
324 I40E_PF_STAT("port.mac_local_faults", stats.mac_local_faults),
325 I40E_PF_STAT("port.mac_remote_faults", stats.mac_remote_faults),
326 I40E_PF_STAT("port.tx_timeout", tx_timeout_count),
327 I40E_PF_STAT("port.rx_csum_bad", hw_csum_rx_error),
328 I40E_PF_STAT("port.rx_length_errors", stats.rx_length_errors),
329 I40E_PF_STAT("port.link_xon_rx", stats.link_xon_rx),
330 I40E_PF_STAT("port.link_xoff_rx", stats.link_xoff_rx),
331 I40E_PF_STAT("port.link_xon_tx", stats.link_xon_tx),
332 I40E_PF_STAT("port.link_xoff_tx", stats.link_xoff_tx),
333 I40E_PF_STAT("port.rx_size_64", stats.rx_size_64),
334 I40E_PF_STAT("port.rx_size_127", stats.rx_size_127),
335 I40E_PF_STAT("port.rx_size_255", stats.rx_size_255),
336 I40E_PF_STAT("port.rx_size_511", stats.rx_size_511),
337 I40E_PF_STAT("port.rx_size_1023", stats.rx_size_1023),
338 I40E_PF_STAT("port.rx_size_1522", stats.rx_size_1522),
339 I40E_PF_STAT("port.rx_size_big", stats.rx_size_big),
340 I40E_PF_STAT("port.tx_size_64", stats.tx_size_64),
341 I40E_PF_STAT("port.tx_size_127", stats.tx_size_127),
342 I40E_PF_STAT("port.tx_size_255", stats.tx_size_255),
343 I40E_PF_STAT("port.tx_size_511", stats.tx_size_511),
344 I40E_PF_STAT("port.tx_size_1023", stats.tx_size_1023),
345 I40E_PF_STAT("port.tx_size_1522", stats.tx_size_1522),
346 I40E_PF_STAT("port.tx_size_big", stats.tx_size_big),
347 I40E_PF_STAT("port.rx_undersize", stats.rx_undersize),
348 I40E_PF_STAT("port.rx_fragments", stats.rx_fragments),
349 I40E_PF_STAT("port.rx_oversize", stats.rx_oversize),
350 I40E_PF_STAT("port.rx_jabber", stats.rx_jabber),
351 I40E_PF_STAT("port.VF_admin_queue_requests", vf_aq_requests),
352 I40E_PF_STAT("port.arq_overflows", arq_overflows),
353 I40E_PF_STAT("port.tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
354 I40E_PF_STAT("port.rx_hwtstamp_cleared", rx_hwtstamp_cleared),
355 I40E_PF_STAT("port.tx_hwtstamp_skipped", tx_hwtstamp_skipped),
356 I40E_PF_STAT("port.fdir_flush_cnt", fd_flush_cnt),
357 I40E_PF_STAT("port.fdir_atr_match", stats.fd_atr_match),
358 I40E_PF_STAT("port.fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
359 I40E_PF_STAT("port.fdir_atr_status", stats.fd_atr_status),
360 I40E_PF_STAT("port.fdir_sb_match", stats.fd_sb_match),
361 I40E_PF_STAT("port.fdir_sb_status", stats.fd_sb_status),
362
363 /* LPI stats */
364 I40E_PF_STAT("port.tx_lpi_status", stats.tx_lpi_status),
365 I40E_PF_STAT("port.rx_lpi_status", stats.rx_lpi_status),
366 I40E_PF_STAT("port.tx_lpi_count", stats.tx_lpi_count),
367 I40E_PF_STAT("port.rx_lpi_count", stats.rx_lpi_count),
368 };
369
370 struct i40e_pfc_stats {
371 u64 priority_xon_rx;
372 u64 priority_xoff_rx;
373 u64 priority_xon_tx;
374 u64 priority_xoff_tx;
375 u64 priority_xon_2_xoff;
376 };
377
378 static const struct i40e_stats i40e_gstrings_pfc_stats[] = {
379 I40E_PFC_STAT("port.tx_priority_%u_xon_tx", priority_xon_tx),
380 I40E_PFC_STAT("port.tx_priority_%u_xoff_tx", priority_xoff_tx),
381 I40E_PFC_STAT("port.rx_priority_%u_xon_rx", priority_xon_rx),
382 I40E_PFC_STAT("port.rx_priority_%u_xoff_rx", priority_xoff_rx),
383 I40E_PFC_STAT("port.rx_priority_%u_xon_2_xoff", priority_xon_2_xoff),
384 };
385
386 #define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
387
388 #define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats)
389
390 #define I40E_VSI_STATS_LEN (I40E_NETDEV_STATS_LEN + I40E_MISC_STATS_LEN)
391
392 #define I40E_PFC_STATS_LEN (ARRAY_SIZE(i40e_gstrings_pfc_stats) * \
393 I40E_MAX_USER_PRIORITY)
394
395 #define I40E_VEB_STATS_LEN (ARRAY_SIZE(i40e_gstrings_veb_stats) + \
396 (ARRAY_SIZE(i40e_gstrings_veb_tc_stats) * \
397 I40E_MAX_TRAFFIC_CLASS))
398
399 #define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
400
401 #define I40E_PF_STATS_LEN (I40E_GLOBAL_STATS_LEN + \
402 I40E_PFC_STATS_LEN + \
403 I40E_VEB_STATS_LEN + \
404 I40E_VSI_STATS_LEN)
405
406 /* Length of stats for a single queue */
407 #define I40E_QUEUE_STATS_LEN ARRAY_SIZE(i40e_gstrings_queue_stats)
408
409 enum i40e_ethtool_test_id {
410 I40E_ETH_TEST_REG = 0,
411 I40E_ETH_TEST_EEPROM,
412 I40E_ETH_TEST_INTR,
413 I40E_ETH_TEST_LINK,
414 };
415
416 static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
417 "Register test (offline)",
418 "Eeprom test (offline)",
419 "Interrupt test (offline)",
420 "Link test (on/offline)"
421 };
422
423 #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
424
425 struct i40e_priv_flags {
426 char flag_string[ETH_GSTRING_LEN];
427 u64 flag;
428 bool read_only;
429 };
430
431 #define I40E_PRIV_FLAG(_name, _flag, _read_only) { \
432 .flag_string = _name, \
433 .flag = _flag, \
434 .read_only = _read_only, \
435 }
436
437 static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
438 /* NOTE: MFP setting cannot be changed */
439 I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENABLED, 1),
440 I40E_PRIV_FLAG("total-port-shutdown",
441 I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED, 1),
442 I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0),
443 I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0),
444 I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0),
445 I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0),
446 I40E_PRIV_FLAG("link-down-on-close",
447 I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED, 0),
448 I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
449 I40E_PRIV_FLAG("disable-source-pruning",
450 I40E_FLAG_SOURCE_PRUNING_DISABLED, 0),
451 I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0),
452 I40E_PRIV_FLAG("rs-fec", I40E_FLAG_RS_FEC, 0),
453 I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0),
454 };
455
456 #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
457
458 /* Private flags with a global effect, restricted to PF 0 */
459 static const struct i40e_priv_flags i40e_gl_gstrings_priv_flags[] = {
460 I40E_PRIV_FLAG("vf-true-promisc-support",
461 I40E_FLAG_TRUE_PROMISC_SUPPORT, 0),
462 };
463
464 #define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_gstrings_priv_flags)
465
466 /**
467 * i40e_partition_setting_complaint - generic complaint for MFP restriction
468 * @pf: the PF struct
469 **/
i40e_partition_setting_complaint(struct i40e_pf * pf)470 static void i40e_partition_setting_complaint(struct i40e_pf *pf)
471 {
472 dev_info(&pf->pdev->dev,
473 "The link settings are allowed to be changed only from the first partition of a given port. Please switch to the first partition in order to change the setting.\n");
474 }
475
476 /**
477 * i40e_phy_type_to_ethtool - convert the phy_types to ethtool link modes
478 * @pf: PF struct with phy_types
479 * @ks: ethtool link ksettings struct to fill out
480 *
481 **/
i40e_phy_type_to_ethtool(struct i40e_pf * pf,struct ethtool_link_ksettings * ks)482 static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
483 struct ethtool_link_ksettings *ks)
484 {
485 struct i40e_link_status *hw_link_info = &pf->hw.phy.link_info;
486 u64 phy_types = pf->hw.phy.phy_types;
487
488 ethtool_link_ksettings_zero_link_mode(ks, supported);
489 ethtool_link_ksettings_zero_link_mode(ks, advertising);
490
491 if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
492 ethtool_link_ksettings_add_link_mode(ks, supported,
493 1000baseT_Full);
494 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
495 ethtool_link_ksettings_add_link_mode(ks, advertising,
496 1000baseT_Full);
497 if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) {
498 ethtool_link_ksettings_add_link_mode(ks, supported,
499 100baseT_Full);
500 ethtool_link_ksettings_add_link_mode(ks, advertising,
501 100baseT_Full);
502 }
503 }
504 if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
505 phy_types & I40E_CAP_PHY_TYPE_XFI ||
506 phy_types & I40E_CAP_PHY_TYPE_SFI ||
507 phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
508 phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC) {
509 ethtool_link_ksettings_add_link_mode(ks, supported,
510 10000baseT_Full);
511 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
512 ethtool_link_ksettings_add_link_mode(ks, advertising,
513 10000baseT_Full);
514 }
515 if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_T) {
516 ethtool_link_ksettings_add_link_mode(ks, supported,
517 10000baseT_Full);
518 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
519 ethtool_link_ksettings_add_link_mode(ks, advertising,
520 10000baseT_Full);
521 }
522 if (phy_types & I40E_CAP_PHY_TYPE_2_5GBASE_T) {
523 ethtool_link_ksettings_add_link_mode(ks, supported,
524 2500baseT_Full);
525 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_2_5GB)
526 ethtool_link_ksettings_add_link_mode(ks, advertising,
527 2500baseT_Full);
528 }
529 if (phy_types & I40E_CAP_PHY_TYPE_5GBASE_T) {
530 ethtool_link_ksettings_add_link_mode(ks, supported,
531 5000baseT_Full);
532 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_5GB)
533 ethtool_link_ksettings_add_link_mode(ks, advertising,
534 5000baseT_Full);
535 }
536 if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
537 phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
538 phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
539 ethtool_link_ksettings_add_link_mode(ks, supported,
540 40000baseCR4_Full);
541 if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
542 phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
543 ethtool_link_ksettings_add_link_mode(ks, supported,
544 40000baseCR4_Full);
545 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_40GB)
546 ethtool_link_ksettings_add_link_mode(ks, advertising,
547 40000baseCR4_Full);
548 }
549 if (phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) {
550 ethtool_link_ksettings_add_link_mode(ks, supported,
551 100baseT_Full);
552 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
553 ethtool_link_ksettings_add_link_mode(ks, advertising,
554 100baseT_Full);
555 }
556 if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T) {
557 ethtool_link_ksettings_add_link_mode(ks, supported,
558 1000baseT_Full);
559 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
560 ethtool_link_ksettings_add_link_mode(ks, advertising,
561 1000baseT_Full);
562 }
563 if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4) {
564 ethtool_link_ksettings_add_link_mode(ks, supported,
565 40000baseSR4_Full);
566 ethtool_link_ksettings_add_link_mode(ks, advertising,
567 40000baseSR4_Full);
568 }
569 if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4) {
570 ethtool_link_ksettings_add_link_mode(ks, supported,
571 40000baseLR4_Full);
572 ethtool_link_ksettings_add_link_mode(ks, advertising,
573 40000baseLR4_Full);
574 }
575 if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
576 ethtool_link_ksettings_add_link_mode(ks, supported,
577 40000baseKR4_Full);
578 ethtool_link_ksettings_add_link_mode(ks, advertising,
579 40000baseKR4_Full);
580 }
581 if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
582 ethtool_link_ksettings_add_link_mode(ks, supported,
583 20000baseKR2_Full);
584 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_20GB)
585 ethtool_link_ksettings_add_link_mode(ks, advertising,
586 20000baseKR2_Full);
587 }
588 if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
589 ethtool_link_ksettings_add_link_mode(ks, supported,
590 10000baseKX4_Full);
591 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
592 ethtool_link_ksettings_add_link_mode(ks, advertising,
593 10000baseKX4_Full);
594 }
595 if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR &&
596 !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) {
597 ethtool_link_ksettings_add_link_mode(ks, supported,
598 10000baseKR_Full);
599 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
600 ethtool_link_ksettings_add_link_mode(ks, advertising,
601 10000baseKR_Full);
602 }
603 if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX &&
604 !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) {
605 ethtool_link_ksettings_add_link_mode(ks, supported,
606 1000baseKX_Full);
607 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
608 ethtool_link_ksettings_add_link_mode(ks, advertising,
609 1000baseKX_Full);
610 }
611 /* need to add 25G PHY types */
612 if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR) {
613 ethtool_link_ksettings_add_link_mode(ks, supported,
614 25000baseKR_Full);
615 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB)
616 ethtool_link_ksettings_add_link_mode(ks, advertising,
617 25000baseKR_Full);
618 }
619 if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR) {
620 ethtool_link_ksettings_add_link_mode(ks, supported,
621 25000baseCR_Full);
622 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB)
623 ethtool_link_ksettings_add_link_mode(ks, advertising,
624 25000baseCR_Full);
625 }
626 if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR ||
627 phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR) {
628 ethtool_link_ksettings_add_link_mode(ks, supported,
629 25000baseSR_Full);
630 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB)
631 ethtool_link_ksettings_add_link_mode(ks, advertising,
632 25000baseSR_Full);
633 }
634 if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_AOC ||
635 phy_types & I40E_CAP_PHY_TYPE_25GBASE_ACC) {
636 ethtool_link_ksettings_add_link_mode(ks, supported,
637 25000baseCR_Full);
638 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB)
639 ethtool_link_ksettings_add_link_mode(ks, advertising,
640 25000baseCR_Full);
641 }
642 if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR ||
643 phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR ||
644 phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR ||
645 phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR ||
646 phy_types & I40E_CAP_PHY_TYPE_25GBASE_AOC ||
647 phy_types & I40E_CAP_PHY_TYPE_25GBASE_ACC) {
648 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
649 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
650 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
651 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) {
652 ethtool_link_ksettings_add_link_mode(ks, advertising,
653 FEC_NONE);
654 ethtool_link_ksettings_add_link_mode(ks, advertising,
655 FEC_RS);
656 ethtool_link_ksettings_add_link_mode(ks, advertising,
657 FEC_BASER);
658 }
659 }
660 /* need to add new 10G PHY types */
661 if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
662 phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) {
663 ethtool_link_ksettings_add_link_mode(ks, supported,
664 10000baseCR_Full);
665 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
666 ethtool_link_ksettings_add_link_mode(ks, advertising,
667 10000baseCR_Full);
668 }
669 if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR) {
670 ethtool_link_ksettings_add_link_mode(ks, supported,
671 10000baseSR_Full);
672 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
673 ethtool_link_ksettings_add_link_mode(ks, advertising,
674 10000baseSR_Full);
675 }
676 if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
677 ethtool_link_ksettings_add_link_mode(ks, supported,
678 10000baseLR_Full);
679 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
680 ethtool_link_ksettings_add_link_mode(ks, advertising,
681 10000baseLR_Full);
682 }
683 if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
684 phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
685 phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
686 ethtool_link_ksettings_add_link_mode(ks, supported,
687 1000baseX_Full);
688 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
689 ethtool_link_ksettings_add_link_mode(ks, advertising,
690 1000baseX_Full);
691 }
692 /* Autoneg PHY types */
693 if (phy_types & I40E_CAP_PHY_TYPE_SGMII ||
694 phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4 ||
695 phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
696 phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4 ||
697 phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR ||
698 phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR ||
699 phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR ||
700 phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR ||
701 phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2 ||
702 phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
703 phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR ||
704 phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4 ||
705 phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR ||
706 phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
707 phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
708 phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
709 phy_types & I40E_CAP_PHY_TYPE_5GBASE_T ||
710 phy_types & I40E_CAP_PHY_TYPE_2_5GBASE_T ||
711 phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL ||
712 phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
713 phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
714 phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
715 phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX ||
716 phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) {
717 ethtool_link_ksettings_add_link_mode(ks, supported,
718 Autoneg);
719 ethtool_link_ksettings_add_link_mode(ks, advertising,
720 Autoneg);
721 }
722 }
723
724 /**
725 * i40e_get_settings_link_up_fec - Get the FEC mode encoding from mask
726 * @req_fec_info: mask request FEC info
727 * @ks: ethtool ksettings to fill in
728 **/
i40e_get_settings_link_up_fec(u8 req_fec_info,struct ethtool_link_ksettings * ks)729 static void i40e_get_settings_link_up_fec(u8 req_fec_info,
730 struct ethtool_link_ksettings *ks)
731 {
732 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
733 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
734 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
735
736 if ((I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) &&
737 (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info)) {
738 ethtool_link_ksettings_add_link_mode(ks, advertising,
739 FEC_NONE);
740 ethtool_link_ksettings_add_link_mode(ks, advertising,
741 FEC_BASER);
742 ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
743 } else if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) {
744 ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
745 } else if (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info) {
746 ethtool_link_ksettings_add_link_mode(ks, advertising,
747 FEC_BASER);
748 } else {
749 ethtool_link_ksettings_add_link_mode(ks, advertising,
750 FEC_NONE);
751 }
752 }
753
754 /**
755 * i40e_get_settings_link_up - Get the Link settings for when link is up
756 * @hw: hw structure
757 * @ks: ethtool ksettings to fill in
758 * @netdev: network interface device structure
759 * @pf: pointer to physical function struct
760 **/
i40e_get_settings_link_up(struct i40e_hw * hw,struct ethtool_link_ksettings * ks,struct net_device * netdev,struct i40e_pf * pf)761 static void i40e_get_settings_link_up(struct i40e_hw *hw,
762 struct ethtool_link_ksettings *ks,
763 struct net_device *netdev,
764 struct i40e_pf *pf)
765 {
766 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
767 struct ethtool_link_ksettings cap_ksettings;
768 u32 link_speed = hw_link_info->link_speed;
769
770 /* Initialize supported and advertised settings based on phy settings */
771 switch (hw_link_info->phy_type) {
772 case I40E_PHY_TYPE_40GBASE_CR4:
773 case I40E_PHY_TYPE_40GBASE_CR4_CU:
774 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
775 ethtool_link_ksettings_add_link_mode(ks, supported,
776 40000baseCR4_Full);
777 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
778 ethtool_link_ksettings_add_link_mode(ks, advertising,
779 40000baseCR4_Full);
780 break;
781 case I40E_PHY_TYPE_XLAUI:
782 case I40E_PHY_TYPE_XLPPI:
783 case I40E_PHY_TYPE_40GBASE_AOC:
784 ethtool_link_ksettings_add_link_mode(ks, supported,
785 40000baseCR4_Full);
786 ethtool_link_ksettings_add_link_mode(ks, advertising,
787 40000baseCR4_Full);
788 break;
789 case I40E_PHY_TYPE_40GBASE_SR4:
790 ethtool_link_ksettings_add_link_mode(ks, supported,
791 40000baseSR4_Full);
792 ethtool_link_ksettings_add_link_mode(ks, advertising,
793 40000baseSR4_Full);
794 break;
795 case I40E_PHY_TYPE_40GBASE_LR4:
796 ethtool_link_ksettings_add_link_mode(ks, supported,
797 40000baseLR4_Full);
798 ethtool_link_ksettings_add_link_mode(ks, advertising,
799 40000baseLR4_Full);
800 break;
801 case I40E_PHY_TYPE_25GBASE_SR:
802 case I40E_PHY_TYPE_25GBASE_LR:
803 case I40E_PHY_TYPE_10GBASE_SR:
804 case I40E_PHY_TYPE_10GBASE_LR:
805 case I40E_PHY_TYPE_1000BASE_SX:
806 case I40E_PHY_TYPE_1000BASE_LX:
807 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
808 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
809 ethtool_link_ksettings_add_link_mode(ks, supported,
810 25000baseSR_Full);
811 ethtool_link_ksettings_add_link_mode(ks, advertising,
812 25000baseSR_Full);
813 i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
814 ethtool_link_ksettings_add_link_mode(ks, supported,
815 10000baseSR_Full);
816 ethtool_link_ksettings_add_link_mode(ks, advertising,
817 10000baseSR_Full);
818 ethtool_link_ksettings_add_link_mode(ks, supported,
819 10000baseLR_Full);
820 ethtool_link_ksettings_add_link_mode(ks, advertising,
821 10000baseLR_Full);
822 ethtool_link_ksettings_add_link_mode(ks, supported,
823 1000baseX_Full);
824 ethtool_link_ksettings_add_link_mode(ks, advertising,
825 1000baseX_Full);
826 ethtool_link_ksettings_add_link_mode(ks, supported,
827 10000baseT_Full);
828 if (hw_link_info->module_type[2] &
829 I40E_MODULE_TYPE_1000BASE_SX ||
830 hw_link_info->module_type[2] &
831 I40E_MODULE_TYPE_1000BASE_LX) {
832 ethtool_link_ksettings_add_link_mode(ks, supported,
833 1000baseT_Full);
834 if (hw_link_info->requested_speeds &
835 I40E_LINK_SPEED_1GB)
836 ethtool_link_ksettings_add_link_mode(
837 ks, advertising, 1000baseT_Full);
838 }
839 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
840 ethtool_link_ksettings_add_link_mode(ks, advertising,
841 10000baseT_Full);
842 break;
843 case I40E_PHY_TYPE_10GBASE_T:
844 case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
845 case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
846 case I40E_PHY_TYPE_1000BASE_T:
847 case I40E_PHY_TYPE_100BASE_TX:
848 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
849 ethtool_link_ksettings_add_link_mode(ks, supported,
850 10000baseT_Full);
851 ethtool_link_ksettings_add_link_mode(ks, supported,
852 5000baseT_Full);
853 ethtool_link_ksettings_add_link_mode(ks, supported,
854 2500baseT_Full);
855 ethtool_link_ksettings_add_link_mode(ks, supported,
856 1000baseT_Full);
857 ethtool_link_ksettings_add_link_mode(ks, supported,
858 100baseT_Full);
859 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
860 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
861 ethtool_link_ksettings_add_link_mode(ks, advertising,
862 10000baseT_Full);
863 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_5GB)
864 ethtool_link_ksettings_add_link_mode(ks, advertising,
865 5000baseT_Full);
866 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_2_5GB)
867 ethtool_link_ksettings_add_link_mode(ks, advertising,
868 2500baseT_Full);
869 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
870 ethtool_link_ksettings_add_link_mode(ks, advertising,
871 1000baseT_Full);
872 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
873 ethtool_link_ksettings_add_link_mode(ks, advertising,
874 100baseT_Full);
875 break;
876 case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
877 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
878 ethtool_link_ksettings_add_link_mode(ks, supported,
879 1000baseT_Full);
880 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
881 ethtool_link_ksettings_add_link_mode(ks, advertising,
882 1000baseT_Full);
883 break;
884 case I40E_PHY_TYPE_10GBASE_CR1_CU:
885 case I40E_PHY_TYPE_10GBASE_CR1:
886 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
887 ethtool_link_ksettings_add_link_mode(ks, supported,
888 10000baseT_Full);
889 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
890 ethtool_link_ksettings_add_link_mode(ks, advertising,
891 10000baseT_Full);
892 break;
893 case I40E_PHY_TYPE_XAUI:
894 case I40E_PHY_TYPE_XFI:
895 case I40E_PHY_TYPE_SFI:
896 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
897 case I40E_PHY_TYPE_10GBASE_AOC:
898 ethtool_link_ksettings_add_link_mode(ks, supported,
899 10000baseT_Full);
900 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
901 ethtool_link_ksettings_add_link_mode(ks, advertising,
902 10000baseT_Full);
903 i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
904 break;
905 case I40E_PHY_TYPE_SGMII:
906 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
907 ethtool_link_ksettings_add_link_mode(ks, supported,
908 1000baseT_Full);
909 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
910 ethtool_link_ksettings_add_link_mode(ks, advertising,
911 1000baseT_Full);
912 if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) {
913 ethtool_link_ksettings_add_link_mode(ks, supported,
914 100baseT_Full);
915 if (hw_link_info->requested_speeds &
916 I40E_LINK_SPEED_100MB)
917 ethtool_link_ksettings_add_link_mode(
918 ks, advertising, 100baseT_Full);
919 }
920 break;
921 case I40E_PHY_TYPE_40GBASE_KR4:
922 case I40E_PHY_TYPE_25GBASE_KR:
923 case I40E_PHY_TYPE_20GBASE_KR2:
924 case I40E_PHY_TYPE_10GBASE_KR:
925 case I40E_PHY_TYPE_10GBASE_KX4:
926 case I40E_PHY_TYPE_1000BASE_KX:
927 ethtool_link_ksettings_add_link_mode(ks, supported,
928 40000baseKR4_Full);
929 ethtool_link_ksettings_add_link_mode(ks, supported,
930 25000baseKR_Full);
931 ethtool_link_ksettings_add_link_mode(ks, supported,
932 20000baseKR2_Full);
933 ethtool_link_ksettings_add_link_mode(ks, supported,
934 10000baseKR_Full);
935 ethtool_link_ksettings_add_link_mode(ks, supported,
936 10000baseKX4_Full);
937 ethtool_link_ksettings_add_link_mode(ks, supported,
938 1000baseKX_Full);
939 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
940 ethtool_link_ksettings_add_link_mode(ks, advertising,
941 40000baseKR4_Full);
942 ethtool_link_ksettings_add_link_mode(ks, advertising,
943 25000baseKR_Full);
944 i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
945 ethtool_link_ksettings_add_link_mode(ks, advertising,
946 20000baseKR2_Full);
947 ethtool_link_ksettings_add_link_mode(ks, advertising,
948 10000baseKR_Full);
949 ethtool_link_ksettings_add_link_mode(ks, advertising,
950 10000baseKX4_Full);
951 ethtool_link_ksettings_add_link_mode(ks, advertising,
952 1000baseKX_Full);
953 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
954 break;
955 case I40E_PHY_TYPE_25GBASE_CR:
956 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
957 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
958 ethtool_link_ksettings_add_link_mode(ks, supported,
959 25000baseCR_Full);
960 ethtool_link_ksettings_add_link_mode(ks, advertising,
961 25000baseCR_Full);
962 i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
963
964 break;
965 case I40E_PHY_TYPE_25GBASE_AOC:
966 case I40E_PHY_TYPE_25GBASE_ACC:
967 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
968 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
969 ethtool_link_ksettings_add_link_mode(ks, supported,
970 25000baseCR_Full);
971 ethtool_link_ksettings_add_link_mode(ks, advertising,
972 25000baseCR_Full);
973 i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
974
975 ethtool_link_ksettings_add_link_mode(ks, supported,
976 10000baseCR_Full);
977 ethtool_link_ksettings_add_link_mode(ks, advertising,
978 10000baseCR_Full);
979 break;
980 default:
981 /* if we got here and link is up something bad is afoot */
982 netdev_info(netdev,
983 "WARNING: Link is up but PHY type 0x%x is not recognized.\n",
984 hw_link_info->phy_type);
985 }
986
987 /* Now that we've worked out everything that could be supported by the
988 * current PHY type, get what is supported by the NVM and intersect
989 * them to get what is truly supported
990 */
991 memset(&cap_ksettings, 0, sizeof(struct ethtool_link_ksettings));
992 i40e_phy_type_to_ethtool(pf, &cap_ksettings);
993 ethtool_intersect_link_masks(ks, &cap_ksettings);
994
995 /* Set speed and duplex */
996 switch (link_speed) {
997 case I40E_LINK_SPEED_40GB:
998 ks->base.speed = SPEED_40000;
999 break;
1000 case I40E_LINK_SPEED_25GB:
1001 ks->base.speed = SPEED_25000;
1002 break;
1003 case I40E_LINK_SPEED_20GB:
1004 ks->base.speed = SPEED_20000;
1005 break;
1006 case I40E_LINK_SPEED_10GB:
1007 ks->base.speed = SPEED_10000;
1008 break;
1009 case I40E_LINK_SPEED_5GB:
1010 ks->base.speed = SPEED_5000;
1011 break;
1012 case I40E_LINK_SPEED_2_5GB:
1013 ks->base.speed = SPEED_2500;
1014 break;
1015 case I40E_LINK_SPEED_1GB:
1016 ks->base.speed = SPEED_1000;
1017 break;
1018 case I40E_LINK_SPEED_100MB:
1019 ks->base.speed = SPEED_100;
1020 break;
1021 default:
1022 ks->base.speed = SPEED_UNKNOWN;
1023 break;
1024 }
1025 ks->base.duplex = DUPLEX_FULL;
1026 }
1027
1028 /**
1029 * i40e_get_settings_link_down - Get the Link settings for when link is down
1030 * @hw: hw structure
1031 * @ks: ethtool ksettings to fill in
1032 * @pf: pointer to physical function struct
1033 *
1034 * Reports link settings that can be determined when link is down
1035 **/
i40e_get_settings_link_down(struct i40e_hw * hw,struct ethtool_link_ksettings * ks,struct i40e_pf * pf)1036 static void i40e_get_settings_link_down(struct i40e_hw *hw,
1037 struct ethtool_link_ksettings *ks,
1038 struct i40e_pf *pf)
1039 {
1040 /* link is down and the driver needs to fall back on
1041 * supported phy types to figure out what info to display
1042 */
1043 i40e_phy_type_to_ethtool(pf, ks);
1044
1045 /* With no link speed and duplex are unknown */
1046 ks->base.speed = SPEED_UNKNOWN;
1047 ks->base.duplex = DUPLEX_UNKNOWN;
1048 }
1049
1050 /**
1051 * i40e_get_link_ksettings - Get Link Speed and Duplex settings
1052 * @netdev: network interface device structure
1053 * @ks: ethtool ksettings
1054 *
1055 * Reports speed/duplex settings based on media_type
1056 **/
i40e_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * ks)1057 static int i40e_get_link_ksettings(struct net_device *netdev,
1058 struct ethtool_link_ksettings *ks)
1059 {
1060 struct i40e_netdev_priv *np = netdev_priv(netdev);
1061 struct i40e_pf *pf = np->vsi->back;
1062 struct i40e_hw *hw = &pf->hw;
1063 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
1064 bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
1065
1066 ethtool_link_ksettings_zero_link_mode(ks, supported);
1067 ethtool_link_ksettings_zero_link_mode(ks, advertising);
1068
1069 if (link_up)
1070 i40e_get_settings_link_up(hw, ks, netdev, pf);
1071 else
1072 i40e_get_settings_link_down(hw, ks, pf);
1073
1074 /* Now set the settings that don't rely on link being up/down */
1075 /* Set autoneg settings */
1076 ks->base.autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
1077 AUTONEG_ENABLE : AUTONEG_DISABLE);
1078
1079 /* Set media type settings */
1080 switch (hw->phy.media_type) {
1081 case I40E_MEDIA_TYPE_BACKPLANE:
1082 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
1083 ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
1084 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
1085 ethtool_link_ksettings_add_link_mode(ks, advertising,
1086 Backplane);
1087 ks->base.port = PORT_NONE;
1088 break;
1089 case I40E_MEDIA_TYPE_BASET:
1090 ethtool_link_ksettings_add_link_mode(ks, supported, TP);
1091 ethtool_link_ksettings_add_link_mode(ks, advertising, TP);
1092 ks->base.port = PORT_TP;
1093 break;
1094 case I40E_MEDIA_TYPE_DA:
1095 case I40E_MEDIA_TYPE_CX4:
1096 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
1097 ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);
1098 ks->base.port = PORT_DA;
1099 break;
1100 case I40E_MEDIA_TYPE_FIBER:
1101 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
1102 ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);
1103 ks->base.port = PORT_FIBRE;
1104 break;
1105 case I40E_MEDIA_TYPE_UNKNOWN:
1106 default:
1107 ks->base.port = PORT_OTHER;
1108 break;
1109 }
1110
1111 /* Set flow control settings */
1112 ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
1113 ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause);
1114
1115 switch (hw->fc.requested_mode) {
1116 case I40E_FC_FULL:
1117 ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
1118 break;
1119 case I40E_FC_TX_PAUSE:
1120 ethtool_link_ksettings_add_link_mode(ks, advertising,
1121 Asym_Pause);
1122 break;
1123 case I40E_FC_RX_PAUSE:
1124 ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
1125 ethtool_link_ksettings_add_link_mode(ks, advertising,
1126 Asym_Pause);
1127 break;
1128 default:
1129 ethtool_link_ksettings_del_link_mode(ks, advertising, Pause);
1130 ethtool_link_ksettings_del_link_mode(ks, advertising,
1131 Asym_Pause);
1132 break;
1133 }
1134
1135 return 0;
1136 }
1137
1138 /**
1139 * i40e_set_link_ksettings - Set Speed and Duplex
1140 * @netdev: network interface device structure
1141 * @ks: ethtool ksettings
1142 *
1143 * Set speed/duplex per media_types advertised/forced
1144 **/
i40e_set_link_ksettings(struct net_device * netdev,const struct ethtool_link_ksettings * ks)1145 static int i40e_set_link_ksettings(struct net_device *netdev,
1146 const struct ethtool_link_ksettings *ks)
1147 {
1148 struct i40e_netdev_priv *np = netdev_priv(netdev);
1149 struct i40e_aq_get_phy_abilities_resp abilities;
1150 struct ethtool_link_ksettings safe_ks;
1151 struct ethtool_link_ksettings copy_ks;
1152 struct i40e_aq_set_phy_config config;
1153 struct i40e_pf *pf = np->vsi->back;
1154 struct i40e_vsi *vsi = np->vsi;
1155 struct i40e_hw *hw = &pf->hw;
1156 bool autoneg_changed = false;
1157 i40e_status status = 0;
1158 int timeout = 50;
1159 int err = 0;
1160 u8 autoneg;
1161
1162 /* Changing port settings is not supported if this isn't the
1163 * port's controlling PF
1164 */
1165 if (hw->partition_id != 1) {
1166 i40e_partition_setting_complaint(pf);
1167 return -EOPNOTSUPP;
1168 }
1169 if (vsi != pf->vsi[pf->lan_vsi])
1170 return -EOPNOTSUPP;
1171 if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
1172 hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
1173 hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE &&
1174 hw->phy.media_type != I40E_MEDIA_TYPE_DA &&
1175 hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
1176 return -EOPNOTSUPP;
1177 if (hw->device_id == I40E_DEV_ID_KX_B ||
1178 hw->device_id == I40E_DEV_ID_KX_C ||
1179 hw->device_id == I40E_DEV_ID_20G_KR2 ||
1180 hw->device_id == I40E_DEV_ID_20G_KR2_A ||
1181 hw->device_id == I40E_DEV_ID_25G_B ||
1182 hw->device_id == I40E_DEV_ID_KX_X722) {
1183 netdev_info(netdev, "Changing settings is not supported on backplane.\n");
1184 return -EOPNOTSUPP;
1185 }
1186
1187 /* copy the ksettings to copy_ks to avoid modifying the origin */
1188 memcpy(©_ks, ks, sizeof(struct ethtool_link_ksettings));
1189
1190 /* save autoneg out of ksettings */
1191 autoneg = copy_ks.base.autoneg;
1192
1193 /* get our own copy of the bits to check against */
1194 memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings));
1195 safe_ks.base.cmd = copy_ks.base.cmd;
1196 safe_ks.base.link_mode_masks_nwords =
1197 copy_ks.base.link_mode_masks_nwords;
1198 i40e_get_link_ksettings(netdev, &safe_ks);
1199
1200 /* Get link modes supported by hardware and check against modes
1201 * requested by the user. Return an error if unsupported mode was set.
1202 */
1203 if (!bitmap_subset(copy_ks.link_modes.advertising,
1204 safe_ks.link_modes.supported,
1205 __ETHTOOL_LINK_MODE_MASK_NBITS))
1206 return -EINVAL;
1207
1208 /* set autoneg back to what it currently is */
1209 copy_ks.base.autoneg = safe_ks.base.autoneg;
1210
1211 /* If copy_ks.base and safe_ks.base are not the same now, then they are
1212 * trying to set something that we do not support.
1213 */
1214 if (memcmp(©_ks.base, &safe_ks.base,
1215 sizeof(struct ethtool_link_settings)))
1216 return -EOPNOTSUPP;
1217
1218 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
1219 timeout--;
1220 if (!timeout)
1221 return -EBUSY;
1222 usleep_range(1000, 2000);
1223 }
1224
1225 /* Get the current phy config */
1226 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
1227 NULL);
1228 if (status) {
1229 err = -EAGAIN;
1230 goto done;
1231 }
1232
1233 /* Copy abilities to config in case autoneg is not
1234 * set below
1235 */
1236 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
1237 config.abilities = abilities.abilities;
1238
1239 /* Check autoneg */
1240 if (autoneg == AUTONEG_ENABLE) {
1241 /* If autoneg was not already enabled */
1242 if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
1243 /* If autoneg is not supported, return error */
1244 if (!ethtool_link_ksettings_test_link_mode(&safe_ks,
1245 supported,
1246 Autoneg)) {
1247 netdev_info(netdev, "Autoneg not supported on this phy\n");
1248 err = -EINVAL;
1249 goto done;
1250 }
1251 /* Autoneg is allowed to change */
1252 config.abilities = abilities.abilities |
1253 I40E_AQ_PHY_ENABLE_AN;
1254 autoneg_changed = true;
1255 }
1256 } else {
1257 /* If autoneg is currently enabled */
1258 if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
1259 /* If autoneg is supported 10GBASE_T is the only PHY
1260 * that can disable it, so otherwise return error
1261 */
1262 if (ethtool_link_ksettings_test_link_mode(&safe_ks,
1263 supported,
1264 Autoneg) &&
1265 hw->phy.link_info.phy_type !=
1266 I40E_PHY_TYPE_10GBASE_T) {
1267 netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
1268 err = -EINVAL;
1269 goto done;
1270 }
1271 /* Autoneg is allowed to change */
1272 config.abilities = abilities.abilities &
1273 ~I40E_AQ_PHY_ENABLE_AN;
1274 autoneg_changed = true;
1275 }
1276 }
1277
1278 if (ethtool_link_ksettings_test_link_mode(ks, advertising,
1279 100baseT_Full))
1280 config.link_speed |= I40E_LINK_SPEED_100MB;
1281 if (ethtool_link_ksettings_test_link_mode(ks, advertising,
1282 1000baseT_Full) ||
1283 ethtool_link_ksettings_test_link_mode(ks, advertising,
1284 1000baseX_Full) ||
1285 ethtool_link_ksettings_test_link_mode(ks, advertising,
1286 1000baseKX_Full))
1287 config.link_speed |= I40E_LINK_SPEED_1GB;
1288 if (ethtool_link_ksettings_test_link_mode(ks, advertising,
1289 10000baseT_Full) ||
1290 ethtool_link_ksettings_test_link_mode(ks, advertising,
1291 10000baseKX4_Full) ||
1292 ethtool_link_ksettings_test_link_mode(ks, advertising,
1293 10000baseKR_Full) ||
1294 ethtool_link_ksettings_test_link_mode(ks, advertising,
1295 10000baseCR_Full) ||
1296 ethtool_link_ksettings_test_link_mode(ks, advertising,
1297 10000baseSR_Full) ||
1298 ethtool_link_ksettings_test_link_mode(ks, advertising,
1299 10000baseLR_Full))
1300 config.link_speed |= I40E_LINK_SPEED_10GB;
1301 if (ethtool_link_ksettings_test_link_mode(ks, advertising,
1302 2500baseT_Full))
1303 config.link_speed |= I40E_LINK_SPEED_2_5GB;
1304 if (ethtool_link_ksettings_test_link_mode(ks, advertising,
1305 5000baseT_Full))
1306 config.link_speed |= I40E_LINK_SPEED_5GB;
1307 if (ethtool_link_ksettings_test_link_mode(ks, advertising,
1308 20000baseKR2_Full))
1309 config.link_speed |= I40E_LINK_SPEED_20GB;
1310 if (ethtool_link_ksettings_test_link_mode(ks, advertising,
1311 25000baseCR_Full) ||
1312 ethtool_link_ksettings_test_link_mode(ks, advertising,
1313 25000baseKR_Full) ||
1314 ethtool_link_ksettings_test_link_mode(ks, advertising,
1315 25000baseSR_Full))
1316 config.link_speed |= I40E_LINK_SPEED_25GB;
1317 if (ethtool_link_ksettings_test_link_mode(ks, advertising,
1318 40000baseKR4_Full) ||
1319 ethtool_link_ksettings_test_link_mode(ks, advertising,
1320 40000baseCR4_Full) ||
1321 ethtool_link_ksettings_test_link_mode(ks, advertising,
1322 40000baseSR4_Full) ||
1323 ethtool_link_ksettings_test_link_mode(ks, advertising,
1324 40000baseLR4_Full))
1325 config.link_speed |= I40E_LINK_SPEED_40GB;
1326
1327 /* If speed didn't get set, set it to what it currently is.
1328 * This is needed because if advertise is 0 (as it is when autoneg
1329 * is disabled) then speed won't get set.
1330 */
1331 if (!config.link_speed)
1332 config.link_speed = abilities.link_speed;
1333 if (autoneg_changed || abilities.link_speed != config.link_speed) {
1334 /* copy over the rest of the abilities */
1335 config.phy_type = abilities.phy_type;
1336 config.phy_type_ext = abilities.phy_type_ext;
1337 config.eee_capability = abilities.eee_capability;
1338 config.eeer = abilities.eeer_val;
1339 config.low_power_ctrl = abilities.d3_lpan;
1340 config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
1341 I40E_AQ_PHY_FEC_CONFIG_MASK;
1342
1343 /* save the requested speeds */
1344 hw->phy.link_info.requested_speeds = config.link_speed;
1345 /* set link and auto negotiation so changes take effect */
1346 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1347 /* If link is up put link down */
1348 if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) {
1349 /* Tell the OS link is going down, the link will go
1350 * back up when fw says it is ready asynchronously
1351 */
1352 i40e_print_link_message(vsi, false);
1353 netif_carrier_off(netdev);
1354 netif_tx_stop_all_queues(netdev);
1355 }
1356
1357 /* make the aq call */
1358 status = i40e_aq_set_phy_config(hw, &config, NULL);
1359 if (status) {
1360 netdev_info(netdev,
1361 "Set phy config failed, err %s aq_err %s\n",
1362 i40e_stat_str(hw, status),
1363 i40e_aq_str(hw, hw->aq.asq_last_status));
1364 err = -EAGAIN;
1365 goto done;
1366 }
1367
1368 status = i40e_update_link_info(hw);
1369 if (status)
1370 netdev_dbg(netdev,
1371 "Updating link info failed with err %s aq_err %s\n",
1372 i40e_stat_str(hw, status),
1373 i40e_aq_str(hw, hw->aq.asq_last_status));
1374
1375 } else {
1376 netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
1377 }
1378
1379 done:
1380 clear_bit(__I40E_CONFIG_BUSY, pf->state);
1381
1382 return err;
1383 }
1384
i40e_set_fec_cfg(struct net_device * netdev,u8 fec_cfg)1385 static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
1386 {
1387 struct i40e_netdev_priv *np = netdev_priv(netdev);
1388 struct i40e_aq_get_phy_abilities_resp abilities;
1389 struct i40e_pf *pf = np->vsi->back;
1390 struct i40e_hw *hw = &pf->hw;
1391 i40e_status status = 0;
1392 u32 flags = 0;
1393 int err = 0;
1394
1395 flags = READ_ONCE(pf->flags);
1396 i40e_set_fec_in_flags(fec_cfg, &flags);
1397
1398 /* Get the current phy config */
1399 memset(&abilities, 0, sizeof(abilities));
1400 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
1401 NULL);
1402 if (status) {
1403 err = -EAGAIN;
1404 goto done;
1405 }
1406
1407 if (abilities.fec_cfg_curr_mod_ext_info != fec_cfg) {
1408 struct i40e_aq_set_phy_config config;
1409
1410 memset(&config, 0, sizeof(config));
1411 config.phy_type = abilities.phy_type;
1412 config.abilities = abilities.abilities |
1413 I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1414 config.phy_type_ext = abilities.phy_type_ext;
1415 config.link_speed = abilities.link_speed;
1416 config.eee_capability = abilities.eee_capability;
1417 config.eeer = abilities.eeer_val;
1418 config.low_power_ctrl = abilities.d3_lpan;
1419 config.fec_config = fec_cfg & I40E_AQ_PHY_FEC_CONFIG_MASK;
1420 status = i40e_aq_set_phy_config(hw, &config, NULL);
1421 if (status) {
1422 netdev_info(netdev,
1423 "Set phy config failed, err %s aq_err %s\n",
1424 i40e_stat_str(hw, status),
1425 i40e_aq_str(hw, hw->aq.asq_last_status));
1426 err = -EAGAIN;
1427 goto done;
1428 }
1429 pf->flags = flags;
1430 status = i40e_update_link_info(hw);
1431 if (status)
1432 /* debug level message only due to relation to the link
1433 * itself rather than to the FEC settings
1434 * (e.g. no physical connection etc.)
1435 */
1436 netdev_dbg(netdev,
1437 "Updating link info failed with err %s aq_err %s\n",
1438 i40e_stat_str(hw, status),
1439 i40e_aq_str(hw, hw->aq.asq_last_status));
1440 }
1441
1442 done:
1443 return err;
1444 }
1445
i40e_get_fec_param(struct net_device * netdev,struct ethtool_fecparam * fecparam)1446 static int i40e_get_fec_param(struct net_device *netdev,
1447 struct ethtool_fecparam *fecparam)
1448 {
1449 struct i40e_netdev_priv *np = netdev_priv(netdev);
1450 struct i40e_aq_get_phy_abilities_resp abilities;
1451 struct i40e_pf *pf = np->vsi->back;
1452 struct i40e_hw *hw = &pf->hw;
1453 i40e_status status = 0;
1454 int err = 0;
1455 u8 fec_cfg;
1456
1457 /* Get the current phy config */
1458 memset(&abilities, 0, sizeof(abilities));
1459 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
1460 NULL);
1461 if (status) {
1462 err = -EAGAIN;
1463 goto done;
1464 }
1465
1466 fecparam->fec = 0;
1467 fec_cfg = abilities.fec_cfg_curr_mod_ext_info;
1468 if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
1469 fecparam->fec |= ETHTOOL_FEC_AUTO;
1470 else if (fec_cfg & (I40E_AQ_SET_FEC_REQUEST_RS |
1471 I40E_AQ_SET_FEC_ABILITY_RS))
1472 fecparam->fec |= ETHTOOL_FEC_RS;
1473 else if (fec_cfg & (I40E_AQ_SET_FEC_REQUEST_KR |
1474 I40E_AQ_SET_FEC_ABILITY_KR))
1475 fecparam->fec |= ETHTOOL_FEC_BASER;
1476 if (fec_cfg == 0)
1477 fecparam->fec |= ETHTOOL_FEC_OFF;
1478
1479 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
1480 fecparam->active_fec = ETHTOOL_FEC_BASER;
1481 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
1482 fecparam->active_fec = ETHTOOL_FEC_RS;
1483 else
1484 fecparam->active_fec = ETHTOOL_FEC_OFF;
1485 done:
1486 return err;
1487 }
1488
i40e_set_fec_param(struct net_device * netdev,struct ethtool_fecparam * fecparam)1489 static int i40e_set_fec_param(struct net_device *netdev,
1490 struct ethtool_fecparam *fecparam)
1491 {
1492 struct i40e_netdev_priv *np = netdev_priv(netdev);
1493 struct i40e_pf *pf = np->vsi->back;
1494 struct i40e_hw *hw = &pf->hw;
1495 u8 fec_cfg = 0;
1496
1497 if (hw->device_id != I40E_DEV_ID_25G_SFP28 &&
1498 hw->device_id != I40E_DEV_ID_25G_B &&
1499 hw->device_id != I40E_DEV_ID_KX_X722)
1500 return -EPERM;
1501
1502 if (hw->mac.type == I40E_MAC_X722 &&
1503 !(hw->flags & I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE)) {
1504 netdev_err(netdev, "Setting FEC encoding not supported by firmware. Please update the NVM image.\n");
1505 return -EOPNOTSUPP;
1506 }
1507
1508 switch (fecparam->fec) {
1509 case ETHTOOL_FEC_AUTO:
1510 fec_cfg = I40E_AQ_SET_FEC_AUTO;
1511 break;
1512 case ETHTOOL_FEC_RS:
1513 fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS |
1514 I40E_AQ_SET_FEC_ABILITY_RS);
1515 break;
1516 case ETHTOOL_FEC_BASER:
1517 fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR |
1518 I40E_AQ_SET_FEC_ABILITY_KR);
1519 break;
1520 case ETHTOOL_FEC_OFF:
1521 case ETHTOOL_FEC_NONE:
1522 fec_cfg = 0;
1523 break;
1524 default:
1525 dev_warn(&pf->pdev->dev, "Unsupported FEC mode: %d",
1526 fecparam->fec);
1527 return -EINVAL;
1528 }
1529
1530 return i40e_set_fec_cfg(netdev, fec_cfg);
1531 }
1532
i40e_nway_reset(struct net_device * netdev)1533 static int i40e_nway_reset(struct net_device *netdev)
1534 {
1535 /* restart autonegotiation */
1536 struct i40e_netdev_priv *np = netdev_priv(netdev);
1537 struct i40e_pf *pf = np->vsi->back;
1538 struct i40e_hw *hw = &pf->hw;
1539 bool link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
1540 i40e_status ret = 0;
1541
1542 ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
1543 if (ret) {
1544 netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
1545 i40e_stat_str(hw, ret),
1546 i40e_aq_str(hw, hw->aq.asq_last_status));
1547 return -EIO;
1548 }
1549
1550 return 0;
1551 }
1552
1553 /**
1554 * i40e_get_pauseparam - Get Flow Control status
1555 * @netdev: netdevice structure
1556 * @pause: buffer to return pause parameters
1557 *
1558 * Return tx/rx-pause status
1559 **/
i40e_get_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)1560 static void i40e_get_pauseparam(struct net_device *netdev,
1561 struct ethtool_pauseparam *pause)
1562 {
1563 struct i40e_netdev_priv *np = netdev_priv(netdev);
1564 struct i40e_pf *pf = np->vsi->back;
1565 struct i40e_hw *hw = &pf->hw;
1566 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
1567 struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
1568
1569 pause->autoneg =
1570 ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
1571 AUTONEG_ENABLE : AUTONEG_DISABLE);
1572
1573 /* PFC enabled so report LFC as off */
1574 if (dcbx_cfg->pfc.pfcenable) {
1575 pause->rx_pause = 0;
1576 pause->tx_pause = 0;
1577 return;
1578 }
1579
1580 if (hw->fc.current_mode == I40E_FC_RX_PAUSE) {
1581 pause->rx_pause = 1;
1582 } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) {
1583 pause->tx_pause = 1;
1584 } else if (hw->fc.current_mode == I40E_FC_FULL) {
1585 pause->rx_pause = 1;
1586 pause->tx_pause = 1;
1587 }
1588 }
1589
1590 /**
1591 * i40e_set_pauseparam - Set Flow Control parameter
1592 * @netdev: network interface device structure
1593 * @pause: return tx/rx flow control status
1594 **/
i40e_set_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)1595 static int i40e_set_pauseparam(struct net_device *netdev,
1596 struct ethtool_pauseparam *pause)
1597 {
1598 struct i40e_netdev_priv *np = netdev_priv(netdev);
1599 struct i40e_pf *pf = np->vsi->back;
1600 struct i40e_vsi *vsi = np->vsi;
1601 struct i40e_hw *hw = &pf->hw;
1602 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
1603 struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
1604 bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
1605 i40e_status status;
1606 u8 aq_failures;
1607 int err = 0;
1608 u32 is_an;
1609
1610 /* Changing the port's flow control is not supported if this isn't the
1611 * port's controlling PF
1612 */
1613 if (hw->partition_id != 1) {
1614 i40e_partition_setting_complaint(pf);
1615 return -EOPNOTSUPP;
1616 }
1617
1618 if (vsi != pf->vsi[pf->lan_vsi])
1619 return -EOPNOTSUPP;
1620
1621 is_an = hw_link_info->an_info & I40E_AQ_AN_COMPLETED;
1622 if (pause->autoneg != is_an) {
1623 netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
1624 return -EOPNOTSUPP;
1625 }
1626
1627 /* If we have link and don't have autoneg */
1628 if (!test_bit(__I40E_DOWN, pf->state) && !is_an) {
1629 /* Send message that it might not necessarily work*/
1630 netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
1631 }
1632
1633 if (dcbx_cfg->pfc.pfcenable) {
1634 netdev_info(netdev,
1635 "Priority flow control enabled. Cannot set link flow control.\n");
1636 return -EOPNOTSUPP;
1637 }
1638
1639 if (pause->rx_pause && pause->tx_pause)
1640 hw->fc.requested_mode = I40E_FC_FULL;
1641 else if (pause->rx_pause && !pause->tx_pause)
1642 hw->fc.requested_mode = I40E_FC_RX_PAUSE;
1643 else if (!pause->rx_pause && pause->tx_pause)
1644 hw->fc.requested_mode = I40E_FC_TX_PAUSE;
1645 else if (!pause->rx_pause && !pause->tx_pause)
1646 hw->fc.requested_mode = I40E_FC_NONE;
1647 else
1648 return -EINVAL;
1649
1650 /* Tell the OS link is going down, the link will go back up when fw
1651 * says it is ready asynchronously
1652 */
1653 i40e_print_link_message(vsi, false);
1654 netif_carrier_off(netdev);
1655 netif_tx_stop_all_queues(netdev);
1656
1657 /* Set the fc mode and only restart an if link is up*/
1658 status = i40e_set_fc(hw, &aq_failures, link_up);
1659
1660 if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
1661 netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
1662 i40e_stat_str(hw, status),
1663 i40e_aq_str(hw, hw->aq.asq_last_status));
1664 err = -EAGAIN;
1665 }
1666 if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
1667 netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
1668 i40e_stat_str(hw, status),
1669 i40e_aq_str(hw, hw->aq.asq_last_status));
1670 err = -EAGAIN;
1671 }
1672 if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
1673 netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
1674 i40e_stat_str(hw, status),
1675 i40e_aq_str(hw, hw->aq.asq_last_status));
1676 err = -EAGAIN;
1677 }
1678
1679 if (!test_bit(__I40E_DOWN, pf->state) && is_an) {
1680 /* Give it a little more time to try to come back */
1681 msleep(75);
1682 if (!test_bit(__I40E_DOWN, pf->state))
1683 return i40e_nway_reset(netdev);
1684 }
1685
1686 return err;
1687 }
1688
i40e_get_msglevel(struct net_device * netdev)1689 static u32 i40e_get_msglevel(struct net_device *netdev)
1690 {
1691 struct i40e_netdev_priv *np = netdev_priv(netdev);
1692 struct i40e_pf *pf = np->vsi->back;
1693 u32 debug_mask = pf->hw.debug_mask;
1694
1695 if (debug_mask)
1696 netdev_info(netdev, "i40e debug_mask: 0x%08X\n", debug_mask);
1697
1698 return pf->msg_enable;
1699 }
1700
i40e_set_msglevel(struct net_device * netdev,u32 data)1701 static void i40e_set_msglevel(struct net_device *netdev, u32 data)
1702 {
1703 struct i40e_netdev_priv *np = netdev_priv(netdev);
1704 struct i40e_pf *pf = np->vsi->back;
1705
1706 if (I40E_DEBUG_USER & data)
1707 pf->hw.debug_mask = data;
1708 else
1709 pf->msg_enable = data;
1710 }
1711
i40e_get_regs_len(struct net_device * netdev)1712 static int i40e_get_regs_len(struct net_device *netdev)
1713 {
1714 int reg_count = 0;
1715 int i;
1716
1717 for (i = 0; i40e_reg_list[i].offset != 0; i++)
1718 reg_count += i40e_reg_list[i].elements;
1719
1720 return reg_count * sizeof(u32);
1721 }
1722
i40e_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * p)1723 static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1724 void *p)
1725 {
1726 struct i40e_netdev_priv *np = netdev_priv(netdev);
1727 struct i40e_pf *pf = np->vsi->back;
1728 struct i40e_hw *hw = &pf->hw;
1729 u32 *reg_buf = p;
1730 unsigned int i, j, ri;
1731 u32 reg;
1732
1733 /* Tell ethtool which driver-version-specific regs output we have.
1734 *
1735 * At some point, if we have ethtool doing special formatting of
1736 * this data, it will rely on this version number to know how to
1737 * interpret things. Hence, this needs to be updated if/when the
1738 * diags register table is changed.
1739 */
1740 regs->version = 1;
1741
1742 /* loop through the diags reg table for what to print */
1743 ri = 0;
1744 for (i = 0; i40e_reg_list[i].offset != 0; i++) {
1745 for (j = 0; j < i40e_reg_list[i].elements; j++) {
1746 reg = i40e_reg_list[i].offset
1747 + (j * i40e_reg_list[i].stride);
1748 reg_buf[ri++] = rd32(hw, reg);
1749 }
1750 }
1751
1752 }
1753
i40e_get_eeprom(struct net_device * netdev,struct ethtool_eeprom * eeprom,u8 * bytes)1754 static int i40e_get_eeprom(struct net_device *netdev,
1755 struct ethtool_eeprom *eeprom, u8 *bytes)
1756 {
1757 struct i40e_netdev_priv *np = netdev_priv(netdev);
1758 struct i40e_hw *hw = &np->vsi->back->hw;
1759 struct i40e_pf *pf = np->vsi->back;
1760 int ret_val = 0, len, offset;
1761 u8 *eeprom_buff;
1762 u16 i, sectors;
1763 bool last;
1764 u32 magic;
1765
1766 #define I40E_NVM_SECTOR_SIZE 4096
1767 if (eeprom->len == 0)
1768 return -EINVAL;
1769
1770 /* check for NVMUpdate access method */
1771 magic = hw->vendor_id | (hw->device_id << 16);
1772 if (eeprom->magic && eeprom->magic != magic) {
1773 struct i40e_nvm_access *cmd = (struct i40e_nvm_access *)eeprom;
1774 int errno = 0;
1775
1776 /* make sure it is the right magic for NVMUpdate */
1777 if ((eeprom->magic >> 16) != hw->device_id)
1778 errno = -EINVAL;
1779 else if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
1780 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
1781 errno = -EBUSY;
1782 else
1783 ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
1784
1785 if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM))
1786 dev_info(&pf->pdev->dev,
1787 "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
1788 ret_val, hw->aq.asq_last_status, errno,
1789 (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK),
1790 cmd->offset, cmd->data_size);
1791
1792 return errno;
1793 }
1794
1795 /* normal ethtool get_eeprom support */
1796 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
1797
1798 eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL);
1799 if (!eeprom_buff)
1800 return -ENOMEM;
1801
1802 ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
1803 if (ret_val) {
1804 dev_info(&pf->pdev->dev,
1805 "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
1806 ret_val, hw->aq.asq_last_status);
1807 goto free_buff;
1808 }
1809
1810 sectors = eeprom->len / I40E_NVM_SECTOR_SIZE;
1811 sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0;
1812 len = I40E_NVM_SECTOR_SIZE;
1813 last = false;
1814 for (i = 0; i < sectors; i++) {
1815 if (i == (sectors - 1)) {
1816 len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
1817 last = true;
1818 }
1819 offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
1820 ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len,
1821 (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
1822 last, NULL);
1823 if (ret_val && hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
1824 dev_info(&pf->pdev->dev,
1825 "read NVM failed, invalid offset 0x%x\n",
1826 offset);
1827 break;
1828 } else if (ret_val &&
1829 hw->aq.asq_last_status == I40E_AQ_RC_EACCES) {
1830 dev_info(&pf->pdev->dev,
1831 "read NVM failed, access, offset 0x%x\n",
1832 offset);
1833 break;
1834 } else if (ret_val) {
1835 dev_info(&pf->pdev->dev,
1836 "read NVM failed offset %d err=%d status=0x%x\n",
1837 offset, ret_val, hw->aq.asq_last_status);
1838 break;
1839 }
1840 }
1841
1842 i40e_release_nvm(hw);
1843 memcpy(bytes, (u8 *)eeprom_buff, eeprom->len);
1844 free_buff:
1845 kfree(eeprom_buff);
1846 return ret_val;
1847 }
1848
i40e_get_eeprom_len(struct net_device * netdev)1849 static int i40e_get_eeprom_len(struct net_device *netdev)
1850 {
1851 struct i40e_netdev_priv *np = netdev_priv(netdev);
1852 struct i40e_hw *hw = &np->vsi->back->hw;
1853 u32 val;
1854
1855 #define X722_EEPROM_SCOPE_LIMIT 0x5B9FFF
1856 if (hw->mac.type == I40E_MAC_X722) {
1857 val = X722_EEPROM_SCOPE_LIMIT + 1;
1858 return val;
1859 }
1860 val = (rd32(hw, I40E_GLPCI_LBARCTRL)
1861 & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
1862 >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
1863 /* register returns value in power of 2, 64Kbyte chunks. */
1864 val = (64 * 1024) * BIT(val);
1865 return val;
1866 }
1867
i40e_set_eeprom(struct net_device * netdev,struct ethtool_eeprom * eeprom,u8 * bytes)1868 static int i40e_set_eeprom(struct net_device *netdev,
1869 struct ethtool_eeprom *eeprom, u8 *bytes)
1870 {
1871 struct i40e_netdev_priv *np = netdev_priv(netdev);
1872 struct i40e_hw *hw = &np->vsi->back->hw;
1873 struct i40e_pf *pf = np->vsi->back;
1874 struct i40e_nvm_access *cmd = (struct i40e_nvm_access *)eeprom;
1875 int ret_val = 0;
1876 int errno = 0;
1877 u32 magic;
1878
1879 /* normal ethtool set_eeprom is not supported */
1880 magic = hw->vendor_id | (hw->device_id << 16);
1881 if (eeprom->magic == magic)
1882 errno = -EOPNOTSUPP;
1883 /* check for NVMUpdate access method */
1884 else if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id)
1885 errno = -EINVAL;
1886 else if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
1887 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
1888 errno = -EBUSY;
1889 else
1890 ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
1891
1892 if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM))
1893 dev_info(&pf->pdev->dev,
1894 "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
1895 ret_val, hw->aq.asq_last_status, errno,
1896 (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK),
1897 cmd->offset, cmd->data_size);
1898
1899 return errno;
1900 }
1901
i40e_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)1902 static void i40e_get_drvinfo(struct net_device *netdev,
1903 struct ethtool_drvinfo *drvinfo)
1904 {
1905 struct i40e_netdev_priv *np = netdev_priv(netdev);
1906 struct i40e_vsi *vsi = np->vsi;
1907 struct i40e_pf *pf = vsi->back;
1908
1909 strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
1910 strlcpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw),
1911 sizeof(drvinfo->fw_version));
1912 strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
1913 sizeof(drvinfo->bus_info));
1914 drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
1915 if (pf->hw.pf_id == 0)
1916 drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN;
1917 }
1918
i40e_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring)1919 static void i40e_get_ringparam(struct net_device *netdev,
1920 struct ethtool_ringparam *ring)
1921 {
1922 struct i40e_netdev_priv *np = netdev_priv(netdev);
1923 struct i40e_pf *pf = np->vsi->back;
1924 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
1925
1926 ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
1927 ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
1928 ring->rx_mini_max_pending = 0;
1929 ring->rx_jumbo_max_pending = 0;
1930 ring->rx_pending = vsi->rx_rings[0]->count;
1931 ring->tx_pending = vsi->tx_rings[0]->count;
1932 ring->rx_mini_pending = 0;
1933 ring->rx_jumbo_pending = 0;
1934 }
1935
i40e_active_tx_ring_index(struct i40e_vsi * vsi,u16 index)1936 static bool i40e_active_tx_ring_index(struct i40e_vsi *vsi, u16 index)
1937 {
1938 if (i40e_enabled_xdp_vsi(vsi)) {
1939 return index < vsi->num_queue_pairs ||
1940 (index >= vsi->alloc_queue_pairs &&
1941 index < vsi->alloc_queue_pairs + vsi->num_queue_pairs);
1942 }
1943
1944 return index < vsi->num_queue_pairs;
1945 }
1946
i40e_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring)1947 static int i40e_set_ringparam(struct net_device *netdev,
1948 struct ethtool_ringparam *ring)
1949 {
1950 struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
1951 struct i40e_netdev_priv *np = netdev_priv(netdev);
1952 struct i40e_hw *hw = &np->vsi->back->hw;
1953 struct i40e_vsi *vsi = np->vsi;
1954 struct i40e_pf *pf = vsi->back;
1955 u32 new_rx_count, new_tx_count;
1956 u16 tx_alloc_queue_pairs;
1957 int timeout = 50;
1958 int i, err = 0;
1959
1960 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1961 return -EINVAL;
1962
1963 if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS ||
1964 ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
1965 ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS ||
1966 ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
1967 netdev_info(netdev,
1968 "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
1969 ring->tx_pending, ring->rx_pending,
1970 I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS);
1971 return -EINVAL;
1972 }
1973
1974 new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
1975 new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
1976
1977 /* if nothing to do return success */
1978 if ((new_tx_count == vsi->tx_rings[0]->count) &&
1979 (new_rx_count == vsi->rx_rings[0]->count))
1980 return 0;
1981
1982 /* If there is a AF_XDP page pool attached to any of Rx rings,
1983 * disallow changing the number of descriptors -- regardless
1984 * if the netdev is running or not.
1985 */
1986 if (i40e_xsk_any_rx_ring_enabled(vsi))
1987 return -EBUSY;
1988
1989 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
1990 timeout--;
1991 if (!timeout)
1992 return -EBUSY;
1993 usleep_range(1000, 2000);
1994 }
1995
1996 if (!netif_running(vsi->netdev)) {
1997 /* simple case - set for the next time the netdev is started */
1998 for (i = 0; i < vsi->num_queue_pairs; i++) {
1999 vsi->tx_rings[i]->count = new_tx_count;
2000 vsi->rx_rings[i]->count = new_rx_count;
2001 if (i40e_enabled_xdp_vsi(vsi))
2002 vsi->xdp_rings[i]->count = new_tx_count;
2003 }
2004 vsi->num_tx_desc = new_tx_count;
2005 vsi->num_rx_desc = new_rx_count;
2006 goto done;
2007 }
2008
2009 /* We can't just free everything and then setup again,
2010 * because the ISRs in MSI-X mode get passed pointers
2011 * to the Tx and Rx ring structs.
2012 */
2013
2014 /* alloc updated Tx and XDP Tx resources */
2015 tx_alloc_queue_pairs = vsi->alloc_queue_pairs *
2016 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
2017 if (new_tx_count != vsi->tx_rings[0]->count) {
2018 netdev_info(netdev,
2019 "Changing Tx descriptor count from %d to %d.\n",
2020 vsi->tx_rings[0]->count, new_tx_count);
2021 tx_rings = kcalloc(tx_alloc_queue_pairs,
2022 sizeof(struct i40e_ring), GFP_KERNEL);
2023 if (!tx_rings) {
2024 err = -ENOMEM;
2025 goto done;
2026 }
2027
2028 for (i = 0; i < tx_alloc_queue_pairs; i++) {
2029 if (!i40e_active_tx_ring_index(vsi, i))
2030 continue;
2031
2032 tx_rings[i] = *vsi->tx_rings[i];
2033 tx_rings[i].count = new_tx_count;
2034 /* the desc and bi pointers will be reallocated in the
2035 * setup call
2036 */
2037 tx_rings[i].desc = NULL;
2038 tx_rings[i].rx_bi = NULL;
2039 err = i40e_setup_tx_descriptors(&tx_rings[i]);
2040 if (err) {
2041 while (i) {
2042 i--;
2043 if (!i40e_active_tx_ring_index(vsi, i))
2044 continue;
2045 i40e_free_tx_resources(&tx_rings[i]);
2046 }
2047 kfree(tx_rings);
2048 tx_rings = NULL;
2049
2050 goto done;
2051 }
2052 }
2053 }
2054
2055 /* alloc updated Rx resources */
2056 if (new_rx_count != vsi->rx_rings[0]->count) {
2057 netdev_info(netdev,
2058 "Changing Rx descriptor count from %d to %d\n",
2059 vsi->rx_rings[0]->count, new_rx_count);
2060 rx_rings = kcalloc(vsi->alloc_queue_pairs,
2061 sizeof(struct i40e_ring), GFP_KERNEL);
2062 if (!rx_rings) {
2063 err = -ENOMEM;
2064 goto free_tx;
2065 }
2066
2067 for (i = 0; i < vsi->num_queue_pairs; i++) {
2068 u16 unused;
2069
2070 /* clone ring and setup updated count */
2071 rx_rings[i] = *vsi->rx_rings[i];
2072 rx_rings[i].count = new_rx_count;
2073 /* the desc and bi pointers will be reallocated in the
2074 * setup call
2075 */
2076 rx_rings[i].desc = NULL;
2077 rx_rings[i].rx_bi = NULL;
2078 /* Clear cloned XDP RX-queue info before setup call */
2079 memset(&rx_rings[i].xdp_rxq, 0, sizeof(rx_rings[i].xdp_rxq));
2080 /* this is to allow wr32 to have something to write to
2081 * during early allocation of Rx buffers
2082 */
2083 rx_rings[i].tail = hw->hw_addr + I40E_PRTGEN_STATUS;
2084 err = i40e_setup_rx_descriptors(&rx_rings[i]);
2085 if (err)
2086 goto rx_unwind;
2087 err = i40e_alloc_rx_bi(&rx_rings[i]);
2088 if (err)
2089 goto rx_unwind;
2090
2091 /* now allocate the Rx buffers to make sure the OS
2092 * has enough memory, any failure here means abort
2093 */
2094 unused = I40E_DESC_UNUSED(&rx_rings[i]);
2095 err = i40e_alloc_rx_buffers(&rx_rings[i], unused);
2096 rx_unwind:
2097 if (err) {
2098 do {
2099 i40e_free_rx_resources(&rx_rings[i]);
2100 } while (i--);
2101 kfree(rx_rings);
2102 rx_rings = NULL;
2103
2104 goto free_tx;
2105 }
2106 }
2107 }
2108
2109 /* Bring interface down, copy in the new ring info,
2110 * then restore the interface
2111 */
2112 i40e_down(vsi);
2113
2114 if (tx_rings) {
2115 for (i = 0; i < tx_alloc_queue_pairs; i++) {
2116 if (i40e_active_tx_ring_index(vsi, i)) {
2117 i40e_free_tx_resources(vsi->tx_rings[i]);
2118 *vsi->tx_rings[i] = tx_rings[i];
2119 }
2120 }
2121 kfree(tx_rings);
2122 tx_rings = NULL;
2123 }
2124
2125 if (rx_rings) {
2126 for (i = 0; i < vsi->num_queue_pairs; i++) {
2127 i40e_free_rx_resources(vsi->rx_rings[i]);
2128 /* get the real tail offset */
2129 rx_rings[i].tail = vsi->rx_rings[i]->tail;
2130 /* this is to fake out the allocation routine
2131 * into thinking it has to realloc everything
2132 * but the recycling logic will let us re-use
2133 * the buffers allocated above
2134 */
2135 rx_rings[i].next_to_use = 0;
2136 rx_rings[i].next_to_clean = 0;
2137 rx_rings[i].next_to_alloc = 0;
2138 /* do a struct copy */
2139 *vsi->rx_rings[i] = rx_rings[i];
2140 }
2141 kfree(rx_rings);
2142 rx_rings = NULL;
2143 }
2144
2145 vsi->num_tx_desc = new_tx_count;
2146 vsi->num_rx_desc = new_rx_count;
2147 i40e_up(vsi);
2148
2149 free_tx:
2150 /* error cleanup if the Rx allocations failed after getting Tx */
2151 if (tx_rings) {
2152 for (i = 0; i < tx_alloc_queue_pairs; i++) {
2153 if (i40e_active_tx_ring_index(vsi, i))
2154 i40e_free_tx_resources(vsi->tx_rings[i]);
2155 }
2156 kfree(tx_rings);
2157 tx_rings = NULL;
2158 }
2159
2160 done:
2161 clear_bit(__I40E_CONFIG_BUSY, pf->state);
2162
2163 return err;
2164 }
2165
2166 /**
2167 * i40e_get_stats_count - return the stats count for a device
2168 * @netdev: the netdev to return the count for
2169 *
2170 * Returns the total number of statistics for this netdev. Note that even
2171 * though this is a function, it is required that the count for a specific
2172 * netdev must never change. Basing the count on static values such as the
2173 * maximum number of queues or the device type is ok. However, the API for
2174 * obtaining stats is *not* safe against changes based on non-static
2175 * values such as the *current* number of queues, or runtime flags.
2176 *
2177 * If a statistic is not always enabled, return it as part of the count
2178 * anyways, always return its string, and report its value as zero.
2179 **/
i40e_get_stats_count(struct net_device * netdev)2180 static int i40e_get_stats_count(struct net_device *netdev)
2181 {
2182 struct i40e_netdev_priv *np = netdev_priv(netdev);
2183 struct i40e_vsi *vsi = np->vsi;
2184 struct i40e_pf *pf = vsi->back;
2185 int stats_len;
2186
2187 if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1)
2188 stats_len = I40E_PF_STATS_LEN;
2189 else
2190 stats_len = I40E_VSI_STATS_LEN;
2191
2192 /* The number of stats reported for a given net_device must remain
2193 * constant throughout the life of that device.
2194 *
2195 * This is because the API for obtaining the size, strings, and stats
2196 * is spread out over three separate ethtool ioctls. There is no safe
2197 * way to lock the number of stats across these calls, so we must
2198 * assume that they will never change.
2199 *
2200 * Due to this, we report the maximum number of queues, even if not
2201 * every queue is currently configured. Since we always allocate
2202 * queues in pairs, we'll just use netdev->num_tx_queues * 2. This
2203 * works because the num_tx_queues is set at device creation and never
2204 * changes.
2205 */
2206 stats_len += I40E_QUEUE_STATS_LEN * 2 * netdev->num_tx_queues;
2207
2208 return stats_len;
2209 }
2210
i40e_get_sset_count(struct net_device * netdev,int sset)2211 static int i40e_get_sset_count(struct net_device *netdev, int sset)
2212 {
2213 struct i40e_netdev_priv *np = netdev_priv(netdev);
2214 struct i40e_vsi *vsi = np->vsi;
2215 struct i40e_pf *pf = vsi->back;
2216
2217 switch (sset) {
2218 case ETH_SS_TEST:
2219 return I40E_TEST_LEN;
2220 case ETH_SS_STATS:
2221 return i40e_get_stats_count(netdev);
2222 case ETH_SS_PRIV_FLAGS:
2223 return I40E_PRIV_FLAGS_STR_LEN +
2224 (pf->hw.pf_id == 0 ? I40E_GL_PRIV_FLAGS_STR_LEN : 0);
2225 default:
2226 return -EOPNOTSUPP;
2227 }
2228 }
2229
2230 /**
2231 * i40e_get_veb_tc_stats - copy VEB TC statistics to formatted structure
2232 * @tc: the TC statistics in VEB structure (veb->tc_stats)
2233 * @i: the index of traffic class in (veb->tc_stats) structure to copy
2234 *
2235 * Copy VEB TC statistics from structure of arrays (veb->tc_stats) to
2236 * one dimensional structure i40e_cp_veb_tc_stats.
2237 * Produce formatted i40e_cp_veb_tc_stats structure of the VEB TC
2238 * statistics for the given TC.
2239 **/
2240 static struct i40e_cp_veb_tc_stats
i40e_get_veb_tc_stats(struct i40e_veb_tc_stats * tc,unsigned int i)2241 i40e_get_veb_tc_stats(struct i40e_veb_tc_stats *tc, unsigned int i)
2242 {
2243 struct i40e_cp_veb_tc_stats veb_tc = {
2244 .tc_rx_packets = tc->tc_rx_packets[i],
2245 .tc_rx_bytes = tc->tc_rx_bytes[i],
2246 .tc_tx_packets = tc->tc_tx_packets[i],
2247 .tc_tx_bytes = tc->tc_tx_bytes[i],
2248 };
2249
2250 return veb_tc;
2251 }
2252
2253 /**
2254 * i40e_get_pfc_stats - copy HW PFC statistics to formatted structure
2255 * @pf: the PF device structure
2256 * @i: the priority value to copy
2257 *
2258 * The PFC stats are found as arrays in pf->stats, which is not easy to pass
2259 * into i40e_add_ethtool_stats. Produce a formatted i40e_pfc_stats structure
2260 * of the PFC stats for the given priority.
2261 **/
2262 static inline struct i40e_pfc_stats
i40e_get_pfc_stats(struct i40e_pf * pf,unsigned int i)2263 i40e_get_pfc_stats(struct i40e_pf *pf, unsigned int i)
2264 {
2265 #define I40E_GET_PFC_STAT(stat, priority) \
2266 .stat = pf->stats.stat[priority]
2267
2268 struct i40e_pfc_stats pfc = {
2269 I40E_GET_PFC_STAT(priority_xon_rx, i),
2270 I40E_GET_PFC_STAT(priority_xoff_rx, i),
2271 I40E_GET_PFC_STAT(priority_xon_tx, i),
2272 I40E_GET_PFC_STAT(priority_xoff_tx, i),
2273 I40E_GET_PFC_STAT(priority_xon_2_xoff, i),
2274 };
2275 return pfc;
2276 }
2277
2278 /**
2279 * i40e_get_ethtool_stats - copy stat values into supplied buffer
2280 * @netdev: the netdev to collect stats for
2281 * @stats: ethtool stats command structure
2282 * @data: ethtool supplied buffer
2283 *
2284 * Copy the stats values for this netdev into the buffer. Expects data to be
2285 * pre-allocated to the size returned by i40e_get_stats_count.. Note that all
2286 * statistics must be copied in a static order, and the count must not change
2287 * for a given netdev. See i40e_get_stats_count for more details.
2288 *
2289 * If a statistic is not currently valid (such as a disabled queue), this
2290 * function reports its value as zero.
2291 **/
i40e_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)2292 static void i40e_get_ethtool_stats(struct net_device *netdev,
2293 struct ethtool_stats *stats, u64 *data)
2294 {
2295 struct i40e_netdev_priv *np = netdev_priv(netdev);
2296 struct i40e_vsi *vsi = np->vsi;
2297 struct i40e_pf *pf = vsi->back;
2298 struct i40e_veb *veb = NULL;
2299 unsigned int i;
2300 bool veb_stats;
2301 u64 *p = data;
2302
2303 i40e_update_stats(vsi);
2304
2305 i40e_add_ethtool_stats(&data, i40e_get_vsi_stats_struct(vsi),
2306 i40e_gstrings_net_stats);
2307
2308 i40e_add_ethtool_stats(&data, vsi, i40e_gstrings_misc_stats);
2309
2310 rcu_read_lock();
2311 for (i = 0; i < netdev->num_tx_queues; i++) {
2312 i40e_add_queue_stats(&data, READ_ONCE(vsi->tx_rings[i]));
2313 i40e_add_queue_stats(&data, READ_ONCE(vsi->rx_rings[i]));
2314 }
2315 rcu_read_unlock();
2316
2317 if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
2318 goto check_data_pointer;
2319
2320 veb_stats = ((pf->lan_veb != I40E_NO_VEB) &&
2321 (pf->lan_veb < I40E_MAX_VEB) &&
2322 (pf->flags & I40E_FLAG_VEB_STATS_ENABLED));
2323
2324 if (veb_stats) {
2325 veb = pf->veb[pf->lan_veb];
2326 i40e_update_veb_stats(veb);
2327 }
2328
2329 /* If veb stats aren't enabled, pass NULL instead of the veb so that
2330 * we initialize stats to zero and update the data pointer
2331 * intelligently
2332 */
2333 i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL,
2334 i40e_gstrings_veb_stats);
2335
2336 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2337 if (veb_stats) {
2338 struct i40e_cp_veb_tc_stats veb_tc =
2339 i40e_get_veb_tc_stats(&veb->tc_stats, i);
2340
2341 i40e_add_ethtool_stats(&data, &veb_tc,
2342 i40e_gstrings_veb_tc_stats);
2343 } else {
2344 i40e_add_ethtool_stats(&data, NULL,
2345 i40e_gstrings_veb_tc_stats);
2346 }
2347
2348 i40e_add_ethtool_stats(&data, pf, i40e_gstrings_stats);
2349
2350 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
2351 struct i40e_pfc_stats pfc = i40e_get_pfc_stats(pf, i);
2352
2353 i40e_add_ethtool_stats(&data, &pfc, i40e_gstrings_pfc_stats);
2354 }
2355
2356 check_data_pointer:
2357 WARN_ONCE(data - p != i40e_get_stats_count(netdev),
2358 "ethtool stats count mismatch!");
2359 }
2360
2361 /**
2362 * i40e_get_stat_strings - copy stat strings into supplied buffer
2363 * @netdev: the netdev to collect strings for
2364 * @data: supplied buffer to copy strings into
2365 *
2366 * Copy the strings related to stats for this netdev. Expects data to be
2367 * pre-allocated with the size reported by i40e_get_stats_count. Note that the
2368 * strings must be copied in a static order and the total count must not
2369 * change for a given netdev. See i40e_get_stats_count for more details.
2370 **/
i40e_get_stat_strings(struct net_device * netdev,u8 * data)2371 static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
2372 {
2373 struct i40e_netdev_priv *np = netdev_priv(netdev);
2374 struct i40e_vsi *vsi = np->vsi;
2375 struct i40e_pf *pf = vsi->back;
2376 unsigned int i;
2377 u8 *p = data;
2378
2379 i40e_add_stat_strings(&data, i40e_gstrings_net_stats);
2380
2381 i40e_add_stat_strings(&data, i40e_gstrings_misc_stats);
2382
2383 for (i = 0; i < netdev->num_tx_queues; i++) {
2384 i40e_add_stat_strings(&data, i40e_gstrings_queue_stats,
2385 "tx", i);
2386 i40e_add_stat_strings(&data, i40e_gstrings_queue_stats,
2387 "rx", i);
2388 }
2389
2390 if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
2391 goto check_data_pointer;
2392
2393 i40e_add_stat_strings(&data, i40e_gstrings_veb_stats);
2394
2395 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2396 i40e_add_stat_strings(&data, i40e_gstrings_veb_tc_stats, i);
2397
2398 i40e_add_stat_strings(&data, i40e_gstrings_stats);
2399
2400 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
2401 i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i);
2402
2403 check_data_pointer:
2404 WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
2405 "stat strings count mismatch!");
2406 }
2407
i40e_get_priv_flag_strings(struct net_device * netdev,u8 * data)2408 static void i40e_get_priv_flag_strings(struct net_device *netdev, u8 *data)
2409 {
2410 struct i40e_netdev_priv *np = netdev_priv(netdev);
2411 struct i40e_vsi *vsi = np->vsi;
2412 struct i40e_pf *pf = vsi->back;
2413 unsigned int i;
2414 u8 *p = data;
2415
2416 for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++)
2417 ethtool_sprintf(&p, i40e_gstrings_priv_flags[i].flag_string);
2418 if (pf->hw.pf_id != 0)
2419 return;
2420 for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++)
2421 ethtool_sprintf(&p, i40e_gl_gstrings_priv_flags[i].flag_string);
2422 }
2423
i40e_get_strings(struct net_device * netdev,u32 stringset,u8 * data)2424 static void i40e_get_strings(struct net_device *netdev, u32 stringset,
2425 u8 *data)
2426 {
2427 switch (stringset) {
2428 case ETH_SS_TEST:
2429 memcpy(data, i40e_gstrings_test,
2430 I40E_TEST_LEN * ETH_GSTRING_LEN);
2431 break;
2432 case ETH_SS_STATS:
2433 i40e_get_stat_strings(netdev, data);
2434 break;
2435 case ETH_SS_PRIV_FLAGS:
2436 i40e_get_priv_flag_strings(netdev, data);
2437 break;
2438 default:
2439 break;
2440 }
2441 }
2442
i40e_get_ts_info(struct net_device * dev,struct ethtool_ts_info * info)2443 static int i40e_get_ts_info(struct net_device *dev,
2444 struct ethtool_ts_info *info)
2445 {
2446 struct i40e_pf *pf = i40e_netdev_to_pf(dev);
2447
2448 /* only report HW timestamping if PTP is enabled */
2449 if (!(pf->flags & I40E_FLAG_PTP))
2450 return ethtool_op_get_ts_info(dev, info);
2451
2452 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
2453 SOF_TIMESTAMPING_RX_SOFTWARE |
2454 SOF_TIMESTAMPING_SOFTWARE |
2455 SOF_TIMESTAMPING_TX_HARDWARE |
2456 SOF_TIMESTAMPING_RX_HARDWARE |
2457 SOF_TIMESTAMPING_RAW_HARDWARE;
2458
2459 if (pf->ptp_clock)
2460 info->phc_index = ptp_clock_index(pf->ptp_clock);
2461 else
2462 info->phc_index = -1;
2463
2464 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
2465
2466 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
2467 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
2468 BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
2469 BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ);
2470
2471 if (pf->hw_features & I40E_HW_PTP_L4_CAPABLE)
2472 info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
2473 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
2474 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
2475 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
2476 BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
2477 BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
2478 BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
2479 BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
2480
2481 return 0;
2482 }
2483
i40e_link_test(struct net_device * netdev,u64 * data)2484 static u64 i40e_link_test(struct net_device *netdev, u64 *data)
2485 {
2486 struct i40e_netdev_priv *np = netdev_priv(netdev);
2487 struct i40e_pf *pf = np->vsi->back;
2488 i40e_status status;
2489 bool link_up = false;
2490
2491 netif_info(pf, hw, netdev, "link test\n");
2492 status = i40e_get_link_status(&pf->hw, &link_up);
2493 if (status) {
2494 netif_err(pf, drv, netdev, "link query timed out, please retry test\n");
2495 *data = 1;
2496 return *data;
2497 }
2498
2499 if (link_up)
2500 *data = 0;
2501 else
2502 *data = 1;
2503
2504 return *data;
2505 }
2506
i40e_reg_test(struct net_device * netdev,u64 * data)2507 static u64 i40e_reg_test(struct net_device *netdev, u64 *data)
2508 {
2509 struct i40e_netdev_priv *np = netdev_priv(netdev);
2510 struct i40e_pf *pf = np->vsi->back;
2511
2512 netif_info(pf, hw, netdev, "register test\n");
2513 *data = i40e_diag_reg_test(&pf->hw);
2514
2515 return *data;
2516 }
2517
i40e_eeprom_test(struct net_device * netdev,u64 * data)2518 static u64 i40e_eeprom_test(struct net_device *netdev, u64 *data)
2519 {
2520 struct i40e_netdev_priv *np = netdev_priv(netdev);
2521 struct i40e_pf *pf = np->vsi->back;
2522
2523 netif_info(pf, hw, netdev, "eeprom test\n");
2524 *data = i40e_diag_eeprom_test(&pf->hw);
2525
2526 /* forcebly clear the NVM Update state machine */
2527 pf->hw.nvmupd_state = I40E_NVMUPD_STATE_INIT;
2528
2529 return *data;
2530 }
2531
i40e_intr_test(struct net_device * netdev,u64 * data)2532 static u64 i40e_intr_test(struct net_device *netdev, u64 *data)
2533 {
2534 struct i40e_netdev_priv *np = netdev_priv(netdev);
2535 struct i40e_pf *pf = np->vsi->back;
2536 u16 swc_old = pf->sw_int_count;
2537
2538 netif_info(pf, hw, netdev, "interrupt test\n");
2539 wr32(&pf->hw, I40E_PFINT_DYN_CTL0,
2540 (I40E_PFINT_DYN_CTL0_INTENA_MASK |
2541 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
2542 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
2543 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
2544 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
2545 usleep_range(1000, 2000);
2546 *data = (swc_old == pf->sw_int_count);
2547
2548 return *data;
2549 }
2550
i40e_active_vfs(struct i40e_pf * pf)2551 static inline bool i40e_active_vfs(struct i40e_pf *pf)
2552 {
2553 struct i40e_vf *vfs = pf->vf;
2554 int i;
2555
2556 for (i = 0; i < pf->num_alloc_vfs; i++)
2557 if (test_bit(I40E_VF_STATE_ACTIVE, &vfs[i].vf_states))
2558 return true;
2559 return false;
2560 }
2561
i40e_active_vmdqs(struct i40e_pf * pf)2562 static inline bool i40e_active_vmdqs(struct i40e_pf *pf)
2563 {
2564 return !!i40e_find_vsi_by_type(pf, I40E_VSI_VMDQ2);
2565 }
2566
i40e_diag_test(struct net_device * netdev,struct ethtool_test * eth_test,u64 * data)2567 static void i40e_diag_test(struct net_device *netdev,
2568 struct ethtool_test *eth_test, u64 *data)
2569 {
2570 struct i40e_netdev_priv *np = netdev_priv(netdev);
2571 bool if_running = netif_running(netdev);
2572 struct i40e_pf *pf = np->vsi->back;
2573
2574 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
2575 /* Offline tests */
2576 netif_info(pf, drv, netdev, "offline testing starting\n");
2577
2578 set_bit(__I40E_TESTING, pf->state);
2579
2580 if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) {
2581 dev_warn(&pf->pdev->dev,
2582 "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
2583 data[I40E_ETH_TEST_REG] = 1;
2584 data[I40E_ETH_TEST_EEPROM] = 1;
2585 data[I40E_ETH_TEST_INTR] = 1;
2586 data[I40E_ETH_TEST_LINK] = 1;
2587 eth_test->flags |= ETH_TEST_FL_FAILED;
2588 clear_bit(__I40E_TESTING, pf->state);
2589 goto skip_ol_tests;
2590 }
2591
2592 /* If the device is online then take it offline */
2593 if (if_running)
2594 /* indicate we're in test mode */
2595 i40e_close(netdev);
2596 else
2597 /* This reset does not affect link - if it is
2598 * changed to a type of reset that does affect
2599 * link then the following link test would have
2600 * to be moved to before the reset
2601 */
2602 i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
2603
2604 if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
2605 eth_test->flags |= ETH_TEST_FL_FAILED;
2606
2607 if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM]))
2608 eth_test->flags |= ETH_TEST_FL_FAILED;
2609
2610 if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR]))
2611 eth_test->flags |= ETH_TEST_FL_FAILED;
2612
2613 /* run reg test last, a reset is required after it */
2614 if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
2615 eth_test->flags |= ETH_TEST_FL_FAILED;
2616
2617 clear_bit(__I40E_TESTING, pf->state);
2618 i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
2619
2620 if (if_running)
2621 i40e_open(netdev);
2622 } else {
2623 /* Online tests */
2624 netif_info(pf, drv, netdev, "online testing starting\n");
2625
2626 if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
2627 eth_test->flags |= ETH_TEST_FL_FAILED;
2628
2629 /* Offline only tests, not run in online; pass by default */
2630 data[I40E_ETH_TEST_REG] = 0;
2631 data[I40E_ETH_TEST_EEPROM] = 0;
2632 data[I40E_ETH_TEST_INTR] = 0;
2633 }
2634
2635 skip_ol_tests:
2636
2637 netif_info(pf, drv, netdev, "testing finished\n");
2638 }
2639
i40e_get_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)2640 static void i40e_get_wol(struct net_device *netdev,
2641 struct ethtool_wolinfo *wol)
2642 {
2643 struct i40e_netdev_priv *np = netdev_priv(netdev);
2644 struct i40e_pf *pf = np->vsi->back;
2645 struct i40e_hw *hw = &pf->hw;
2646 u16 wol_nvm_bits;
2647
2648 /* NVM bit on means WoL disabled for the port */
2649 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
2650 if ((BIT(hw->port) & wol_nvm_bits) || (hw->partition_id != 1)) {
2651 wol->supported = 0;
2652 wol->wolopts = 0;
2653 } else {
2654 wol->supported = WAKE_MAGIC;
2655 wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0);
2656 }
2657 }
2658
2659 /**
2660 * i40e_set_wol - set the WakeOnLAN configuration
2661 * @netdev: the netdev in question
2662 * @wol: the ethtool WoL setting data
2663 **/
i40e_set_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)2664 static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2665 {
2666 struct i40e_netdev_priv *np = netdev_priv(netdev);
2667 struct i40e_pf *pf = np->vsi->back;
2668 struct i40e_vsi *vsi = np->vsi;
2669 struct i40e_hw *hw = &pf->hw;
2670 u16 wol_nvm_bits;
2671
2672 /* WoL not supported if this isn't the controlling PF on the port */
2673 if (hw->partition_id != 1) {
2674 i40e_partition_setting_complaint(pf);
2675 return -EOPNOTSUPP;
2676 }
2677
2678 if (vsi != pf->vsi[pf->lan_vsi])
2679 return -EOPNOTSUPP;
2680
2681 /* NVM bit on means WoL disabled for the port */
2682 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
2683 if (BIT(hw->port) & wol_nvm_bits)
2684 return -EOPNOTSUPP;
2685
2686 /* only magic packet is supported */
2687 if (wol->wolopts & ~WAKE_MAGIC)
2688 return -EOPNOTSUPP;
2689
2690 /* is this a new value? */
2691 if (pf->wol_en != !!wol->wolopts) {
2692 pf->wol_en = !!wol->wolopts;
2693 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
2694 }
2695
2696 return 0;
2697 }
2698
i40e_set_phys_id(struct net_device * netdev,enum ethtool_phys_id_state state)2699 static int i40e_set_phys_id(struct net_device *netdev,
2700 enum ethtool_phys_id_state state)
2701 {
2702 struct i40e_netdev_priv *np = netdev_priv(netdev);
2703 i40e_status ret = 0;
2704 struct i40e_pf *pf = np->vsi->back;
2705 struct i40e_hw *hw = &pf->hw;
2706 int blink_freq = 2;
2707 u16 temp_status;
2708
2709 switch (state) {
2710 case ETHTOOL_ID_ACTIVE:
2711 if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) {
2712 pf->led_status = i40e_led_get(hw);
2713 } else {
2714 if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE))
2715 i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL,
2716 NULL);
2717 ret = i40e_led_get_phy(hw, &temp_status,
2718 &pf->phy_led_val);
2719 pf->led_status = temp_status;
2720 }
2721 return blink_freq;
2722 case ETHTOOL_ID_ON:
2723 if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS))
2724 i40e_led_set(hw, 0xf, false);
2725 else
2726 ret = i40e_led_set_phy(hw, true, pf->led_status, 0);
2727 break;
2728 case ETHTOOL_ID_OFF:
2729 if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS))
2730 i40e_led_set(hw, 0x0, false);
2731 else
2732 ret = i40e_led_set_phy(hw, false, pf->led_status, 0);
2733 break;
2734 case ETHTOOL_ID_INACTIVE:
2735 if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) {
2736 i40e_led_set(hw, pf->led_status, false);
2737 } else {
2738 ret = i40e_led_set_phy(hw, false, pf->led_status,
2739 (pf->phy_led_val |
2740 I40E_PHY_LED_MODE_ORIG));
2741 if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE))
2742 i40e_aq_set_phy_debug(hw, 0, NULL);
2743 }
2744 break;
2745 default:
2746 break;
2747 }
2748 if (ret)
2749 return -ENOENT;
2750 else
2751 return 0;
2752 }
2753
2754 /* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt
2755 * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also
2756 * 125us (8000 interrupts per second) == ITR(62)
2757 */
2758
2759 /**
2760 * __i40e_get_coalesce - get per-queue coalesce settings
2761 * @netdev: the netdev to check
2762 * @ec: ethtool coalesce data structure
2763 * @queue: which queue to pick
2764 *
2765 * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
2766 * are per queue. If queue is <0 then we default to queue 0 as the
2767 * representative value.
2768 **/
__i40e_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,int queue)2769 static int __i40e_get_coalesce(struct net_device *netdev,
2770 struct ethtool_coalesce *ec,
2771 int queue)
2772 {
2773 struct i40e_netdev_priv *np = netdev_priv(netdev);
2774 struct i40e_ring *rx_ring, *tx_ring;
2775 struct i40e_vsi *vsi = np->vsi;
2776
2777 ec->tx_max_coalesced_frames_irq = vsi->work_limit;
2778 ec->rx_max_coalesced_frames_irq = vsi->work_limit;
2779
2780 /* rx and tx usecs has per queue value. If user doesn't specify the
2781 * queue, return queue 0's value to represent.
2782 */
2783 if (queue < 0)
2784 queue = 0;
2785 else if (queue >= vsi->num_queue_pairs)
2786 return -EINVAL;
2787
2788 rx_ring = vsi->rx_rings[queue];
2789 tx_ring = vsi->tx_rings[queue];
2790
2791 if (ITR_IS_DYNAMIC(rx_ring->itr_setting))
2792 ec->use_adaptive_rx_coalesce = 1;
2793
2794 if (ITR_IS_DYNAMIC(tx_ring->itr_setting))
2795 ec->use_adaptive_tx_coalesce = 1;
2796
2797 ec->rx_coalesce_usecs = rx_ring->itr_setting & ~I40E_ITR_DYNAMIC;
2798 ec->tx_coalesce_usecs = tx_ring->itr_setting & ~I40E_ITR_DYNAMIC;
2799
2800 /* we use the _usecs_high to store/set the interrupt rate limit
2801 * that the hardware supports, that almost but not quite
2802 * fits the original intent of the ethtool variable,
2803 * the rx_coalesce_usecs_high limits total interrupts
2804 * per second from both tx/rx sources.
2805 */
2806 ec->rx_coalesce_usecs_high = vsi->int_rate_limit;
2807 ec->tx_coalesce_usecs_high = vsi->int_rate_limit;
2808
2809 return 0;
2810 }
2811
2812 /**
2813 * i40e_get_coalesce - get a netdev's coalesce settings
2814 * @netdev: the netdev to check
2815 * @ec: ethtool coalesce data structure
2816 *
2817 * Gets the coalesce settings for a particular netdev. Note that if user has
2818 * modified per-queue settings, this only guarantees to represent queue 0. See
2819 * __i40e_get_coalesce for more details.
2820 **/
i40e_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec)2821 static int i40e_get_coalesce(struct net_device *netdev,
2822 struct ethtool_coalesce *ec)
2823 {
2824 return __i40e_get_coalesce(netdev, ec, -1);
2825 }
2826
2827 /**
2828 * i40e_get_per_queue_coalesce - gets coalesce settings for particular queue
2829 * @netdev: netdev structure
2830 * @ec: ethtool's coalesce settings
2831 * @queue: the particular queue to read
2832 *
2833 * Will read a specific queue's coalesce settings
2834 **/
i40e_get_per_queue_coalesce(struct net_device * netdev,u32 queue,struct ethtool_coalesce * ec)2835 static int i40e_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
2836 struct ethtool_coalesce *ec)
2837 {
2838 return __i40e_get_coalesce(netdev, ec, queue);
2839 }
2840
2841 /**
2842 * i40e_set_itr_per_queue - set ITR values for specific queue
2843 * @vsi: the VSI to set values for
2844 * @ec: coalesce settings from ethtool
2845 * @queue: the queue to modify
2846 *
2847 * Change the ITR settings for a specific queue.
2848 **/
i40e_set_itr_per_queue(struct i40e_vsi * vsi,struct ethtool_coalesce * ec,int queue)2849 static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
2850 struct ethtool_coalesce *ec,
2851 int queue)
2852 {
2853 struct i40e_ring *rx_ring = vsi->rx_rings[queue];
2854 struct i40e_ring *tx_ring = vsi->tx_rings[queue];
2855 struct i40e_pf *pf = vsi->back;
2856 struct i40e_hw *hw = &pf->hw;
2857 struct i40e_q_vector *q_vector;
2858 u16 intrl;
2859
2860 intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit);
2861
2862 rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
2863 tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
2864
2865 if (ec->use_adaptive_rx_coalesce)
2866 rx_ring->itr_setting |= I40E_ITR_DYNAMIC;
2867 else
2868 rx_ring->itr_setting &= ~I40E_ITR_DYNAMIC;
2869
2870 if (ec->use_adaptive_tx_coalesce)
2871 tx_ring->itr_setting |= I40E_ITR_DYNAMIC;
2872 else
2873 tx_ring->itr_setting &= ~I40E_ITR_DYNAMIC;
2874
2875 q_vector = rx_ring->q_vector;
2876 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
2877
2878 q_vector = tx_ring->q_vector;
2879 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
2880
2881 /* The interrupt handler itself will take care of programming
2882 * the Tx and Rx ITR values based on the values we have entered
2883 * into the q_vector, no need to write the values now.
2884 */
2885
2886 wr32(hw, I40E_PFINT_RATEN(q_vector->reg_idx), intrl);
2887 i40e_flush(hw);
2888 }
2889
2890 /**
2891 * __i40e_set_coalesce - set coalesce settings for particular queue
2892 * @netdev: the netdev to change
2893 * @ec: ethtool coalesce settings
2894 * @queue: the queue to change
2895 *
2896 * Sets the coalesce settings for a particular queue.
2897 **/
__i40e_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,int queue)2898 static int __i40e_set_coalesce(struct net_device *netdev,
2899 struct ethtool_coalesce *ec,
2900 int queue)
2901 {
2902 struct i40e_netdev_priv *np = netdev_priv(netdev);
2903 u16 intrl_reg, cur_rx_itr, cur_tx_itr;
2904 struct i40e_vsi *vsi = np->vsi;
2905 struct i40e_pf *pf = vsi->back;
2906 int i;
2907
2908 if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
2909 vsi->work_limit = ec->tx_max_coalesced_frames_irq;
2910
2911 if (queue < 0) {
2912 cur_rx_itr = vsi->rx_rings[0]->itr_setting;
2913 cur_tx_itr = vsi->tx_rings[0]->itr_setting;
2914 } else if (queue < vsi->num_queue_pairs) {
2915 cur_rx_itr = vsi->rx_rings[queue]->itr_setting;
2916 cur_tx_itr = vsi->tx_rings[queue]->itr_setting;
2917 } else {
2918 netif_info(pf, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
2919 vsi->num_queue_pairs - 1);
2920 return -EINVAL;
2921 }
2922
2923 cur_tx_itr &= ~I40E_ITR_DYNAMIC;
2924 cur_rx_itr &= ~I40E_ITR_DYNAMIC;
2925
2926 /* tx_coalesce_usecs_high is ignored, use rx-usecs-high instead */
2927 if (ec->tx_coalesce_usecs_high != vsi->int_rate_limit) {
2928 netif_info(pf, drv, netdev, "tx-usecs-high is not used, please program rx-usecs-high\n");
2929 return -EINVAL;
2930 }
2931
2932 if (ec->rx_coalesce_usecs_high > INTRL_REG_TO_USEC(I40E_MAX_INTRL)) {
2933 netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-%lu\n",
2934 INTRL_REG_TO_USEC(I40E_MAX_INTRL));
2935 return -EINVAL;
2936 }
2937
2938 if (ec->rx_coalesce_usecs != cur_rx_itr &&
2939 ec->use_adaptive_rx_coalesce) {
2940 netif_info(pf, drv, netdev, "RX interrupt moderation cannot be changed if adaptive-rx is enabled.\n");
2941 return -EINVAL;
2942 }
2943
2944 if (ec->rx_coalesce_usecs > I40E_MAX_ITR) {
2945 netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
2946 return -EINVAL;
2947 }
2948
2949 if (ec->tx_coalesce_usecs != cur_tx_itr &&
2950 ec->use_adaptive_tx_coalesce) {
2951 netif_info(pf, drv, netdev, "TX interrupt moderation cannot be changed if adaptive-tx is enabled.\n");
2952 return -EINVAL;
2953 }
2954
2955 if (ec->tx_coalesce_usecs > I40E_MAX_ITR) {
2956 netif_info(pf, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
2957 return -EINVAL;
2958 }
2959
2960 if (ec->use_adaptive_rx_coalesce && !cur_rx_itr)
2961 ec->rx_coalesce_usecs = I40E_MIN_ITR;
2962
2963 if (ec->use_adaptive_tx_coalesce && !cur_tx_itr)
2964 ec->tx_coalesce_usecs = I40E_MIN_ITR;
2965
2966 intrl_reg = i40e_intrl_usec_to_reg(ec->rx_coalesce_usecs_high);
2967 vsi->int_rate_limit = INTRL_REG_TO_USEC(intrl_reg);
2968 if (vsi->int_rate_limit != ec->rx_coalesce_usecs_high) {
2969 netif_info(pf, drv, netdev, "Interrupt rate limit rounded down to %d\n",
2970 vsi->int_rate_limit);
2971 }
2972
2973 /* rx and tx usecs has per queue value. If user doesn't specify the
2974 * queue, apply to all queues.
2975 */
2976 if (queue < 0) {
2977 for (i = 0; i < vsi->num_queue_pairs; i++)
2978 i40e_set_itr_per_queue(vsi, ec, i);
2979 } else {
2980 i40e_set_itr_per_queue(vsi, ec, queue);
2981 }
2982
2983 return 0;
2984 }
2985
2986 /**
2987 * i40e_set_coalesce - set coalesce settings for every queue on the netdev
2988 * @netdev: the netdev to change
2989 * @ec: ethtool coalesce settings
2990 *
2991 * This will set each queue to the same coalesce settings.
2992 **/
i40e_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec)2993 static int i40e_set_coalesce(struct net_device *netdev,
2994 struct ethtool_coalesce *ec)
2995 {
2996 return __i40e_set_coalesce(netdev, ec, -1);
2997 }
2998
2999 /**
3000 * i40e_set_per_queue_coalesce - set specific queue's coalesce settings
3001 * @netdev: the netdev to change
3002 * @ec: ethtool's coalesce settings
3003 * @queue: the queue to change
3004 *
3005 * Sets the specified queue's coalesce settings.
3006 **/
i40e_set_per_queue_coalesce(struct net_device * netdev,u32 queue,struct ethtool_coalesce * ec)3007 static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
3008 struct ethtool_coalesce *ec)
3009 {
3010 return __i40e_set_coalesce(netdev, ec, queue);
3011 }
3012
3013 /**
3014 * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
3015 * @pf: pointer to the physical function struct
3016 * @cmd: ethtool rxnfc command
3017 *
3018 * Returns Success if the flow is supported, else Invalid Input.
3019 **/
i40e_get_rss_hash_opts(struct i40e_pf * pf,struct ethtool_rxnfc * cmd)3020 static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
3021 {
3022 struct i40e_hw *hw = &pf->hw;
3023 u8 flow_pctype = 0;
3024 u64 i_set = 0;
3025
3026 cmd->data = 0;
3027
3028 switch (cmd->flow_type) {
3029 case TCP_V4_FLOW:
3030 flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
3031 break;
3032 case UDP_V4_FLOW:
3033 flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
3034 break;
3035 case TCP_V6_FLOW:
3036 flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
3037 break;
3038 case UDP_V6_FLOW:
3039 flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
3040 break;
3041 case SCTP_V4_FLOW:
3042 case AH_ESP_V4_FLOW:
3043 case AH_V4_FLOW:
3044 case ESP_V4_FLOW:
3045 case IPV4_FLOW:
3046 case SCTP_V6_FLOW:
3047 case AH_ESP_V6_FLOW:
3048 case AH_V6_FLOW:
3049 case ESP_V6_FLOW:
3050 case IPV6_FLOW:
3051 /* Default is src/dest for IP, no matter the L4 hashing */
3052 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
3053 break;
3054 default:
3055 return -EINVAL;
3056 }
3057
3058 /* Read flow based hash input set register */
3059 if (flow_pctype) {
3060 i_set = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0,
3061 flow_pctype)) |
3062 ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1,
3063 flow_pctype)) << 32);
3064 }
3065
3066 /* Process bits of hash input set */
3067 if (i_set) {
3068 if (i_set & I40E_L4_SRC_MASK)
3069 cmd->data |= RXH_L4_B_0_1;
3070 if (i_set & I40E_L4_DST_MASK)
3071 cmd->data |= RXH_L4_B_2_3;
3072
3073 if (cmd->flow_type == TCP_V4_FLOW ||
3074 cmd->flow_type == UDP_V4_FLOW) {
3075 if (i_set & I40E_L3_SRC_MASK)
3076 cmd->data |= RXH_IP_SRC;
3077 if (i_set & I40E_L3_DST_MASK)
3078 cmd->data |= RXH_IP_DST;
3079 } else if (cmd->flow_type == TCP_V6_FLOW ||
3080 cmd->flow_type == UDP_V6_FLOW) {
3081 if (i_set & I40E_L3_V6_SRC_MASK)
3082 cmd->data |= RXH_IP_SRC;
3083 if (i_set & I40E_L3_V6_DST_MASK)
3084 cmd->data |= RXH_IP_DST;
3085 }
3086 }
3087
3088 return 0;
3089 }
3090
3091 /**
3092 * i40e_check_mask - Check whether a mask field is set
3093 * @mask: the full mask value
3094 * @field: mask of the field to check
3095 *
3096 * If the given mask is fully set, return positive value. If the mask for the
3097 * field is fully unset, return zero. Otherwise return a negative error code.
3098 **/
i40e_check_mask(u64 mask,u64 field)3099 static int i40e_check_mask(u64 mask, u64 field)
3100 {
3101 u64 value = mask & field;
3102
3103 if (value == field)
3104 return 1;
3105 else if (!value)
3106 return 0;
3107 else
3108 return -1;
3109 }
3110
3111 /**
3112 * i40e_parse_rx_flow_user_data - Deconstruct user-defined data
3113 * @fsp: pointer to rx flow specification
3114 * @data: pointer to userdef data structure for storage
3115 *
3116 * Read the user-defined data and deconstruct the value into a structure. No
3117 * other code should read the user-defined data, so as to ensure that every
3118 * place consistently reads the value correctly.
3119 *
3120 * The user-defined field is a 64bit Big Endian format value, which we
3121 * deconstruct by reading bits or bit fields from it. Single bit flags shall
3122 * be defined starting from the highest bits, while small bit field values
3123 * shall be defined starting from the lowest bits.
3124 *
3125 * Returns 0 if the data is valid, and non-zero if the userdef data is invalid
3126 * and the filter should be rejected. The data structure will always be
3127 * modified even if FLOW_EXT is not set.
3128 *
3129 **/
i40e_parse_rx_flow_user_data(struct ethtool_rx_flow_spec * fsp,struct i40e_rx_flow_userdef * data)3130 static int i40e_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
3131 struct i40e_rx_flow_userdef *data)
3132 {
3133 u64 value, mask;
3134 int valid;
3135
3136 /* Zero memory first so it's always consistent. */
3137 memset(data, 0, sizeof(*data));
3138
3139 if (!(fsp->flow_type & FLOW_EXT))
3140 return 0;
3141
3142 value = be64_to_cpu(*((__be64 *)fsp->h_ext.data));
3143 mask = be64_to_cpu(*((__be64 *)fsp->m_ext.data));
3144
3145 #define I40E_USERDEF_FLEX_WORD GENMASK_ULL(15, 0)
3146 #define I40E_USERDEF_FLEX_OFFSET GENMASK_ULL(31, 16)
3147 #define I40E_USERDEF_FLEX_FILTER GENMASK_ULL(31, 0)
3148
3149 valid = i40e_check_mask(mask, I40E_USERDEF_FLEX_FILTER);
3150 if (valid < 0) {
3151 return -EINVAL;
3152 } else if (valid) {
3153 data->flex_word = value & I40E_USERDEF_FLEX_WORD;
3154 data->flex_offset =
3155 (value & I40E_USERDEF_FLEX_OFFSET) >> 16;
3156 data->flex_filter = true;
3157 }
3158
3159 return 0;
3160 }
3161
3162 /**
3163 * i40e_fill_rx_flow_user_data - Fill in user-defined data field
3164 * @fsp: pointer to rx_flow specification
3165 * @data: pointer to return userdef data
3166 *
3167 * Reads the userdef data structure and properly fills in the user defined
3168 * fields of the rx_flow_spec.
3169 **/
i40e_fill_rx_flow_user_data(struct ethtool_rx_flow_spec * fsp,struct i40e_rx_flow_userdef * data)3170 static void i40e_fill_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
3171 struct i40e_rx_flow_userdef *data)
3172 {
3173 u64 value = 0, mask = 0;
3174
3175 if (data->flex_filter) {
3176 value |= data->flex_word;
3177 value |= (u64)data->flex_offset << 16;
3178 mask |= I40E_USERDEF_FLEX_FILTER;
3179 }
3180
3181 if (value || mask)
3182 fsp->flow_type |= FLOW_EXT;
3183
3184 *((__be64 *)fsp->h_ext.data) = cpu_to_be64(value);
3185 *((__be64 *)fsp->m_ext.data) = cpu_to_be64(mask);
3186 }
3187
3188 /**
3189 * i40e_get_ethtool_fdir_all - Populates the rule count of a command
3190 * @pf: Pointer to the physical function struct
3191 * @cmd: The command to get or set Rx flow classification rules
3192 * @rule_locs: Array of used rule locations
3193 *
3194 * This function populates both the total and actual rule count of
3195 * the ethtool flow classification command
3196 *
3197 * Returns 0 on success or -EMSGSIZE if entry not found
3198 **/
i40e_get_ethtool_fdir_all(struct i40e_pf * pf,struct ethtool_rxnfc * cmd,u32 * rule_locs)3199 static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf,
3200 struct ethtool_rxnfc *cmd,
3201 u32 *rule_locs)
3202 {
3203 struct i40e_fdir_filter *rule;
3204 struct hlist_node *node2;
3205 int cnt = 0;
3206
3207 /* report total rule count */
3208 cmd->data = i40e_get_fd_cnt_all(pf);
3209
3210 hlist_for_each_entry_safe(rule, node2,
3211 &pf->fdir_filter_list, fdir_node) {
3212 if (cnt == cmd->rule_cnt)
3213 return -EMSGSIZE;
3214
3215 rule_locs[cnt] = rule->fd_id;
3216 cnt++;
3217 }
3218
3219 cmd->rule_cnt = cnt;
3220
3221 return 0;
3222 }
3223
3224 /**
3225 * i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow
3226 * @pf: Pointer to the physical function struct
3227 * @cmd: The command to get or set Rx flow classification rules
3228 *
3229 * This function looks up a filter based on the Rx flow classification
3230 * command and fills the flow spec info for it if found
3231 *
3232 * Returns 0 on success or -EINVAL if filter not found
3233 **/
i40e_get_ethtool_fdir_entry(struct i40e_pf * pf,struct ethtool_rxnfc * cmd)3234 static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
3235 struct ethtool_rxnfc *cmd)
3236 {
3237 struct ethtool_rx_flow_spec *fsp =
3238 (struct ethtool_rx_flow_spec *)&cmd->fs;
3239 struct i40e_rx_flow_userdef userdef = {0};
3240 struct i40e_fdir_filter *rule = NULL;
3241 struct hlist_node *node2;
3242 u64 input_set;
3243 u16 index;
3244
3245 hlist_for_each_entry_safe(rule, node2,
3246 &pf->fdir_filter_list, fdir_node) {
3247 if (fsp->location <= rule->fd_id)
3248 break;
3249 }
3250
3251 if (!rule || fsp->location != rule->fd_id)
3252 return -EINVAL;
3253
3254 fsp->flow_type = rule->flow_type;
3255 if (fsp->flow_type == IP_USER_FLOW) {
3256 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
3257 fsp->h_u.usr_ip4_spec.proto = 0;
3258 fsp->m_u.usr_ip4_spec.proto = 0;
3259 }
3260
3261 if (fsp->flow_type == IPV6_USER_FLOW ||
3262 fsp->flow_type == UDP_V6_FLOW ||
3263 fsp->flow_type == TCP_V6_FLOW ||
3264 fsp->flow_type == SCTP_V6_FLOW) {
3265 /* Reverse the src and dest notion, since the HW views them
3266 * from Tx perspective where as the user expects it from
3267 * Rx filter view.
3268 */
3269 fsp->h_u.tcp_ip6_spec.psrc = rule->dst_port;
3270 fsp->h_u.tcp_ip6_spec.pdst = rule->src_port;
3271 memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->src_ip6,
3272 sizeof(__be32) * 4);
3273 memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->dst_ip6,
3274 sizeof(__be32) * 4);
3275 } else {
3276 /* Reverse the src and dest notion, since the HW views them
3277 * from Tx perspective where as the user expects it from
3278 * Rx filter view.
3279 */
3280 fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port;
3281 fsp->h_u.tcp_ip4_spec.pdst = rule->src_port;
3282 fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip;
3283 fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip;
3284 }
3285
3286 switch (rule->flow_type) {
3287 case SCTP_V4_FLOW:
3288 index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
3289 break;
3290 case TCP_V4_FLOW:
3291 index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
3292 break;
3293 case UDP_V4_FLOW:
3294 index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
3295 break;
3296 case SCTP_V6_FLOW:
3297 index = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
3298 break;
3299 case TCP_V6_FLOW:
3300 index = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
3301 break;
3302 case UDP_V6_FLOW:
3303 index = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
3304 break;
3305 case IP_USER_FLOW:
3306 index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
3307 break;
3308 case IPV6_USER_FLOW:
3309 index = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
3310 break;
3311 default:
3312 /* If we have stored a filter with a flow type not listed here
3313 * it is almost certainly a driver bug. WARN(), and then
3314 * assign the input_set as if all fields are enabled to avoid
3315 * reading unassigned memory.
3316 */
3317 WARN(1, "Missing input set index for flow_type %d\n",
3318 rule->flow_type);
3319 input_set = 0xFFFFFFFFFFFFFFFFULL;
3320 goto no_input_set;
3321 }
3322
3323 input_set = i40e_read_fd_input_set(pf, index);
3324
3325 no_input_set:
3326 if (input_set & I40E_L3_V6_SRC_MASK) {
3327 fsp->m_u.tcp_ip6_spec.ip6src[0] = htonl(0xFFFFFFFF);
3328 fsp->m_u.tcp_ip6_spec.ip6src[1] = htonl(0xFFFFFFFF);
3329 fsp->m_u.tcp_ip6_spec.ip6src[2] = htonl(0xFFFFFFFF);
3330 fsp->m_u.tcp_ip6_spec.ip6src[3] = htonl(0xFFFFFFFF);
3331 }
3332
3333 if (input_set & I40E_L3_V6_DST_MASK) {
3334 fsp->m_u.tcp_ip6_spec.ip6dst[0] = htonl(0xFFFFFFFF);
3335 fsp->m_u.tcp_ip6_spec.ip6dst[1] = htonl(0xFFFFFFFF);
3336 fsp->m_u.tcp_ip6_spec.ip6dst[2] = htonl(0xFFFFFFFF);
3337 fsp->m_u.tcp_ip6_spec.ip6dst[3] = htonl(0xFFFFFFFF);
3338 }
3339
3340 if (input_set & I40E_L3_SRC_MASK)
3341 fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFFFFFF);
3342
3343 if (input_set & I40E_L3_DST_MASK)
3344 fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFFFFFF);
3345
3346 if (input_set & I40E_L4_SRC_MASK)
3347 fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFF);
3348
3349 if (input_set & I40E_L4_DST_MASK)
3350 fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFF);
3351
3352 if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
3353 fsp->ring_cookie = RX_CLS_FLOW_DISC;
3354 else
3355 fsp->ring_cookie = rule->q_index;
3356
3357 if (rule->vlan_tag) {
3358 fsp->h_ext.vlan_etype = rule->vlan_etype;
3359 fsp->m_ext.vlan_etype = htons(0xFFFF);
3360 fsp->h_ext.vlan_tci = rule->vlan_tag;
3361 fsp->m_ext.vlan_tci = htons(0xFFFF);
3362 fsp->flow_type |= FLOW_EXT;
3363 }
3364
3365 if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) {
3366 struct i40e_vsi *vsi;
3367
3368 vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi);
3369 if (vsi && vsi->type == I40E_VSI_SRIOV) {
3370 /* VFs are zero-indexed by the driver, but ethtool
3371 * expects them to be one-indexed, so add one here
3372 */
3373 u64 ring_vf = vsi->vf_id + 1;
3374
3375 ring_vf <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
3376 fsp->ring_cookie |= ring_vf;
3377 }
3378 }
3379
3380 if (rule->flex_filter) {
3381 userdef.flex_filter = true;
3382 userdef.flex_word = be16_to_cpu(rule->flex_word);
3383 userdef.flex_offset = rule->flex_offset;
3384 }
3385
3386 i40e_fill_rx_flow_user_data(fsp, &userdef);
3387
3388 return 0;
3389 }
3390
3391 /**
3392 * i40e_get_rxnfc - command to get RX flow classification rules
3393 * @netdev: network interface device structure
3394 * @cmd: ethtool rxnfc command
3395 * @rule_locs: pointer to store rule data
3396 *
3397 * Returns Success if the command is supported.
3398 **/
i40e_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 * rule_locs)3399 static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
3400 u32 *rule_locs)
3401 {
3402 struct i40e_netdev_priv *np = netdev_priv(netdev);
3403 struct i40e_vsi *vsi = np->vsi;
3404 struct i40e_pf *pf = vsi->back;
3405 int ret = -EOPNOTSUPP;
3406
3407 switch (cmd->cmd) {
3408 case ETHTOOL_GRXRINGS:
3409 cmd->data = vsi->rss_size;
3410 ret = 0;
3411 break;
3412 case ETHTOOL_GRXFH:
3413 ret = i40e_get_rss_hash_opts(pf, cmd);
3414 break;
3415 case ETHTOOL_GRXCLSRLCNT:
3416 cmd->rule_cnt = pf->fdir_pf_active_filters;
3417 /* report total rule count */
3418 cmd->data = i40e_get_fd_cnt_all(pf);
3419 ret = 0;
3420 break;
3421 case ETHTOOL_GRXCLSRULE:
3422 ret = i40e_get_ethtool_fdir_entry(pf, cmd);
3423 break;
3424 case ETHTOOL_GRXCLSRLALL:
3425 ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs);
3426 break;
3427 default:
3428 break;
3429 }
3430
3431 return ret;
3432 }
3433
3434 /**
3435 * i40e_get_rss_hash_bits - Read RSS Hash bits from register
3436 * @nfc: pointer to user request
3437 * @i_setc: bits currently set
3438 *
3439 * Returns value of bits to be set per user request
3440 **/
i40e_get_rss_hash_bits(struct ethtool_rxnfc * nfc,u64 i_setc)3441 static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
3442 {
3443 u64 i_set = i_setc;
3444 u64 src_l3 = 0, dst_l3 = 0;
3445
3446 if (nfc->data & RXH_L4_B_0_1)
3447 i_set |= I40E_L4_SRC_MASK;
3448 else
3449 i_set &= ~I40E_L4_SRC_MASK;
3450 if (nfc->data & RXH_L4_B_2_3)
3451 i_set |= I40E_L4_DST_MASK;
3452 else
3453 i_set &= ~I40E_L4_DST_MASK;
3454
3455 if (nfc->flow_type == TCP_V6_FLOW || nfc->flow_type == UDP_V6_FLOW) {
3456 src_l3 = I40E_L3_V6_SRC_MASK;
3457 dst_l3 = I40E_L3_V6_DST_MASK;
3458 } else if (nfc->flow_type == TCP_V4_FLOW ||
3459 nfc->flow_type == UDP_V4_FLOW) {
3460 src_l3 = I40E_L3_SRC_MASK;
3461 dst_l3 = I40E_L3_DST_MASK;
3462 } else {
3463 /* Any other flow type are not supported here */
3464 return i_set;
3465 }
3466
3467 if (nfc->data & RXH_IP_SRC)
3468 i_set |= src_l3;
3469 else
3470 i_set &= ~src_l3;
3471 if (nfc->data & RXH_IP_DST)
3472 i_set |= dst_l3;
3473 else
3474 i_set &= ~dst_l3;
3475
3476 return i_set;
3477 }
3478
3479 /**
3480 * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
3481 * @pf: pointer to the physical function struct
3482 * @nfc: ethtool rxnfc command
3483 *
3484 * Returns Success if the flow input set is supported.
3485 **/
i40e_set_rss_hash_opt(struct i40e_pf * pf,struct ethtool_rxnfc * nfc)3486 static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
3487 {
3488 struct i40e_hw *hw = &pf->hw;
3489 u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
3490 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
3491 u8 flow_pctype = 0;
3492 u64 i_set, i_setc;
3493
3494 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
3495 dev_err(&pf->pdev->dev,
3496 "Change of RSS hash input set is not supported when MFP mode is enabled\n");
3497 return -EOPNOTSUPP;
3498 }
3499
3500 /* RSS does not support anything other than hashing
3501 * to queues on src and dst IPs and ports
3502 */
3503 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3504 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3505 return -EINVAL;
3506
3507 switch (nfc->flow_type) {
3508 case TCP_V4_FLOW:
3509 flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
3510 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
3511 hena |=
3512 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
3513 break;
3514 case TCP_V6_FLOW:
3515 flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
3516 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
3517 hena |=
3518 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
3519 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
3520 hena |=
3521 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
3522 break;
3523 case UDP_V4_FLOW:
3524 flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
3525 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
3526 hena |=
3527 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
3528 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
3529
3530 hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
3531 break;
3532 case UDP_V6_FLOW:
3533 flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
3534 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
3535 hena |=
3536 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
3537 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
3538
3539 hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
3540 break;
3541 case AH_ESP_V4_FLOW:
3542 case AH_V4_FLOW:
3543 case ESP_V4_FLOW:
3544 case SCTP_V4_FLOW:
3545 if ((nfc->data & RXH_L4_B_0_1) ||
3546 (nfc->data & RXH_L4_B_2_3))
3547 return -EINVAL;
3548 hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3549 break;
3550 case AH_ESP_V6_FLOW:
3551 case AH_V6_FLOW:
3552 case ESP_V6_FLOW:
3553 case SCTP_V6_FLOW:
3554 if ((nfc->data & RXH_L4_B_0_1) ||
3555 (nfc->data & RXH_L4_B_2_3))
3556 return -EINVAL;
3557 hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3558 break;
3559 case IPV4_FLOW:
3560 hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3561 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
3562 break;
3563 case IPV6_FLOW:
3564 hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3565 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
3566 break;
3567 default:
3568 return -EINVAL;
3569 }
3570
3571 if (flow_pctype) {
3572 i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0,
3573 flow_pctype)) |
3574 ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1,
3575 flow_pctype)) << 32);
3576 i_set = i40e_get_rss_hash_bits(nfc, i_setc);
3577 i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_pctype),
3578 (u32)i_set);
3579 i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_pctype),
3580 (u32)(i_set >> 32));
3581 hena |= BIT_ULL(flow_pctype);
3582 }
3583
3584 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
3585 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3586 i40e_flush(hw);
3587
3588 return 0;
3589 }
3590
3591 /**
3592 * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
3593 * @vsi: Pointer to the targeted VSI
3594 * @input: The filter to update or NULL to indicate deletion
3595 * @sw_idx: Software index to the filter
3596 * @cmd: The command to get or set Rx flow classification rules
3597 *
3598 * This function updates (or deletes) a Flow Director entry from
3599 * the hlist of the corresponding PF
3600 *
3601 * Returns 0 on success
3602 **/
i40e_update_ethtool_fdir_entry(struct i40e_vsi * vsi,struct i40e_fdir_filter * input,u16 sw_idx,struct ethtool_rxnfc * cmd)3603 static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
3604 struct i40e_fdir_filter *input,
3605 u16 sw_idx,
3606 struct ethtool_rxnfc *cmd)
3607 {
3608 struct i40e_fdir_filter *rule, *parent;
3609 struct i40e_pf *pf = vsi->back;
3610 struct hlist_node *node2;
3611 int err = -EINVAL;
3612
3613 parent = NULL;
3614 rule = NULL;
3615
3616 hlist_for_each_entry_safe(rule, node2,
3617 &pf->fdir_filter_list, fdir_node) {
3618 /* hash found, or no matching entry */
3619 if (rule->fd_id >= sw_idx)
3620 break;
3621 parent = rule;
3622 }
3623
3624 /* if there is an old rule occupying our place remove it */
3625 if (rule && (rule->fd_id == sw_idx)) {
3626 /* Remove this rule, since we're either deleting it, or
3627 * replacing it.
3628 */
3629 err = i40e_add_del_fdir(vsi, rule, false);
3630 hlist_del(&rule->fdir_node);
3631 kfree(rule);
3632 pf->fdir_pf_active_filters--;
3633 }
3634
3635 /* If we weren't given an input, this is a delete, so just return the
3636 * error code indicating if there was an entry at the requested slot
3637 */
3638 if (!input)
3639 return err;
3640
3641 /* Otherwise, install the new rule as requested */
3642 INIT_HLIST_NODE(&input->fdir_node);
3643
3644 /* add filter to the list */
3645 if (parent)
3646 hlist_add_behind(&input->fdir_node, &parent->fdir_node);
3647 else
3648 hlist_add_head(&input->fdir_node,
3649 &pf->fdir_filter_list);
3650
3651 /* update counts */
3652 pf->fdir_pf_active_filters++;
3653
3654 return 0;
3655 }
3656
3657 /**
3658 * i40e_prune_flex_pit_list - Cleanup unused entries in FLX_PIT table
3659 * @pf: pointer to PF structure
3660 *
3661 * This function searches the list of filters and determines which FLX_PIT
3662 * entries are still required. It will prune any entries which are no longer
3663 * in use after the deletion.
3664 **/
i40e_prune_flex_pit_list(struct i40e_pf * pf)3665 static void i40e_prune_flex_pit_list(struct i40e_pf *pf)
3666 {
3667 struct i40e_flex_pit *entry, *tmp;
3668 struct i40e_fdir_filter *rule;
3669
3670 /* First, we'll check the l3 table */
3671 list_for_each_entry_safe(entry, tmp, &pf->l3_flex_pit_list, list) {
3672 bool found = false;
3673
3674 hlist_for_each_entry(rule, &pf->fdir_filter_list, fdir_node) {
3675 if (rule->flow_type != IP_USER_FLOW)
3676 continue;
3677 if (rule->flex_filter &&
3678 rule->flex_offset == entry->src_offset) {
3679 found = true;
3680 break;
3681 }
3682 }
3683
3684 /* If we didn't find the filter, then we can prune this entry
3685 * from the list.
3686 */
3687 if (!found) {
3688 list_del(&entry->list);
3689 kfree(entry);
3690 }
3691 }
3692
3693 /* Followed by the L4 table */
3694 list_for_each_entry_safe(entry, tmp, &pf->l4_flex_pit_list, list) {
3695 bool found = false;
3696
3697 hlist_for_each_entry(rule, &pf->fdir_filter_list, fdir_node) {
3698 /* Skip this filter if it's L3, since we already
3699 * checked those in the above loop
3700 */
3701 if (rule->flow_type == IP_USER_FLOW)
3702 continue;
3703 if (rule->flex_filter &&
3704 rule->flex_offset == entry->src_offset) {
3705 found = true;
3706 break;
3707 }
3708 }
3709
3710 /* If we didn't find the filter, then we can prune this entry
3711 * from the list.
3712 */
3713 if (!found) {
3714 list_del(&entry->list);
3715 kfree(entry);
3716 }
3717 }
3718 }
3719
3720 /**
3721 * i40e_del_fdir_entry - Deletes a Flow Director filter entry
3722 * @vsi: Pointer to the targeted VSI
3723 * @cmd: The command to get or set Rx flow classification rules
3724 *
3725 * The function removes a Flow Director filter entry from the
3726 * hlist of the corresponding PF
3727 *
3728 * Returns 0 on success
3729 */
i40e_del_fdir_entry(struct i40e_vsi * vsi,struct ethtool_rxnfc * cmd)3730 static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
3731 struct ethtool_rxnfc *cmd)
3732 {
3733 struct ethtool_rx_flow_spec *fsp =
3734 (struct ethtool_rx_flow_spec *)&cmd->fs;
3735 struct i40e_pf *pf = vsi->back;
3736 int ret = 0;
3737
3738 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
3739 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
3740 return -EBUSY;
3741
3742 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
3743 return -EBUSY;
3744
3745 ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
3746
3747 i40e_prune_flex_pit_list(pf);
3748
3749 i40e_fdir_check_and_reenable(pf);
3750 return ret;
3751 }
3752
3753 /**
3754 * i40e_unused_pit_index - Find an unused PIT index for given list
3755 * @pf: the PF data structure
3756 *
3757 * Find the first unused flexible PIT index entry. We search both the L3 and
3758 * L4 flexible PIT lists so that the returned index is unique and unused by
3759 * either currently programmed L3 or L4 filters. We use a bit field as storage
3760 * to track which indexes are already used.
3761 **/
i40e_unused_pit_index(struct i40e_pf * pf)3762 static u8 i40e_unused_pit_index(struct i40e_pf *pf)
3763 {
3764 unsigned long available_index = 0xFF;
3765 struct i40e_flex_pit *entry;
3766
3767 /* We need to make sure that the new index isn't in use by either L3
3768 * or L4 filters so that IP_USER_FLOW filters can program both L3 and
3769 * L4 to use the same index.
3770 */
3771
3772 list_for_each_entry(entry, &pf->l4_flex_pit_list, list)
3773 clear_bit(entry->pit_index, &available_index);
3774
3775 list_for_each_entry(entry, &pf->l3_flex_pit_list, list)
3776 clear_bit(entry->pit_index, &available_index);
3777
3778 return find_first_bit(&available_index, 8);
3779 }
3780
3781 /**
3782 * i40e_find_flex_offset - Find an existing flex src_offset
3783 * @flex_pit_list: L3 or L4 flex PIT list
3784 * @src_offset: new src_offset to find
3785 *
3786 * Searches the flex_pit_list for an existing offset. If no offset is
3787 * currently programmed, then this will return an ERR_PTR if there is no space
3788 * to add a new offset, otherwise it returns NULL.
3789 **/
3790 static
i40e_find_flex_offset(struct list_head * flex_pit_list,u16 src_offset)3791 struct i40e_flex_pit *i40e_find_flex_offset(struct list_head *flex_pit_list,
3792 u16 src_offset)
3793 {
3794 struct i40e_flex_pit *entry;
3795 int size = 0;
3796
3797 /* Search for the src_offset first. If we find a matching entry
3798 * already programmed, we can simply re-use it.
3799 */
3800 list_for_each_entry(entry, flex_pit_list, list) {
3801 size++;
3802 if (entry->src_offset == src_offset)
3803 return entry;
3804 }
3805
3806 /* If we haven't found an entry yet, then the provided src offset has
3807 * not yet been programmed. We will program the src offset later on,
3808 * but we need to indicate whether there is enough space to do so
3809 * here. We'll make use of ERR_PTR for this purpose.
3810 */
3811 if (size >= I40E_FLEX_PIT_TABLE_SIZE)
3812 return ERR_PTR(-ENOSPC);
3813
3814 return NULL;
3815 }
3816
3817 /**
3818 * i40e_add_flex_offset - Add src_offset to flex PIT table list
3819 * @flex_pit_list: L3 or L4 flex PIT list
3820 * @src_offset: new src_offset to add
3821 * @pit_index: the PIT index to program
3822 *
3823 * This function programs the new src_offset to the list. It is expected that
3824 * i40e_find_flex_offset has already been tried and returned NULL, indicating
3825 * that this offset is not programmed, and that the list has enough space to
3826 * store another offset.
3827 *
3828 * Returns 0 on success, and negative value on error.
3829 **/
i40e_add_flex_offset(struct list_head * flex_pit_list,u16 src_offset,u8 pit_index)3830 static int i40e_add_flex_offset(struct list_head *flex_pit_list,
3831 u16 src_offset,
3832 u8 pit_index)
3833 {
3834 struct i40e_flex_pit *new_pit, *entry;
3835
3836 new_pit = kzalloc(sizeof(*entry), GFP_KERNEL);
3837 if (!new_pit)
3838 return -ENOMEM;
3839
3840 new_pit->src_offset = src_offset;
3841 new_pit->pit_index = pit_index;
3842
3843 /* We need to insert this item such that the list is sorted by
3844 * src_offset in ascending order.
3845 */
3846 list_for_each_entry(entry, flex_pit_list, list) {
3847 if (new_pit->src_offset < entry->src_offset) {
3848 list_add_tail(&new_pit->list, &entry->list);
3849 return 0;
3850 }
3851
3852 /* If we found an entry with our offset already programmed we
3853 * can simply return here, after freeing the memory. However,
3854 * if the pit_index does not match we need to report an error.
3855 */
3856 if (new_pit->src_offset == entry->src_offset) {
3857 int err = 0;
3858
3859 /* If the PIT index is not the same we can't re-use
3860 * the entry, so we must report an error.
3861 */
3862 if (new_pit->pit_index != entry->pit_index)
3863 err = -EINVAL;
3864
3865 kfree(new_pit);
3866 return err;
3867 }
3868 }
3869
3870 /* If we reached here, then we haven't yet added the item. This means
3871 * that we should add the item at the end of the list.
3872 */
3873 list_add_tail(&new_pit->list, flex_pit_list);
3874 return 0;
3875 }
3876
3877 /**
3878 * __i40e_reprogram_flex_pit - Re-program specific FLX_PIT table
3879 * @pf: Pointer to the PF structure
3880 * @flex_pit_list: list of flexible src offsets in use
3881 * @flex_pit_start: index to first entry for this section of the table
3882 *
3883 * In order to handle flexible data, the hardware uses a table of values
3884 * called the FLX_PIT table. This table is used to indicate which sections of
3885 * the input correspond to what PIT index values. Unfortunately, hardware is
3886 * very restrictive about programming this table. Entries must be ordered by
3887 * src_offset in ascending order, without duplicates. Additionally, unused
3888 * entries must be set to the unused index value, and must have valid size and
3889 * length according to the src_offset ordering.
3890 *
3891 * This function will reprogram the FLX_PIT register from a book-keeping
3892 * structure that we guarantee is already ordered correctly, and has no more
3893 * than 3 entries.
3894 *
3895 * To make things easier, we only support flexible values of one word length,
3896 * rather than allowing variable length flexible values.
3897 **/
__i40e_reprogram_flex_pit(struct i40e_pf * pf,struct list_head * flex_pit_list,int flex_pit_start)3898 static void __i40e_reprogram_flex_pit(struct i40e_pf *pf,
3899 struct list_head *flex_pit_list,
3900 int flex_pit_start)
3901 {
3902 struct i40e_flex_pit *entry = NULL;
3903 u16 last_offset = 0;
3904 int i = 0, j = 0;
3905
3906 /* First, loop over the list of flex PIT entries, and reprogram the
3907 * registers.
3908 */
3909 list_for_each_entry(entry, flex_pit_list, list) {
3910 /* We have to be careful when programming values for the
3911 * largest SRC_OFFSET value. It is possible that adding
3912 * additional empty values at the end would overflow the space
3913 * for the SRC_OFFSET in the FLX_PIT register. To avoid this,
3914 * we check here and add the empty values prior to adding the
3915 * largest value.
3916 *
3917 * To determine this, we will use a loop from i+1 to 3, which
3918 * will determine whether the unused entries would have valid
3919 * SRC_OFFSET. Note that there cannot be extra entries past
3920 * this value, because the only valid values would have been
3921 * larger than I40E_MAX_FLEX_SRC_OFFSET, and thus would not
3922 * have been added to the list in the first place.
3923 */
3924 for (j = i + 1; j < 3; j++) {
3925 u16 offset = entry->src_offset + j;
3926 int index = flex_pit_start + i;
3927 u32 value = I40E_FLEX_PREP_VAL(I40E_FLEX_DEST_UNUSED,
3928 1,
3929 offset - 3);
3930
3931 if (offset > I40E_MAX_FLEX_SRC_OFFSET) {
3932 i40e_write_rx_ctl(&pf->hw,
3933 I40E_PRTQF_FLX_PIT(index),
3934 value);
3935 i++;
3936 }
3937 }
3938
3939 /* Now, we can program the actual value into the table */
3940 i40e_write_rx_ctl(&pf->hw,
3941 I40E_PRTQF_FLX_PIT(flex_pit_start + i),
3942 I40E_FLEX_PREP_VAL(entry->pit_index + 50,
3943 1,
3944 entry->src_offset));
3945 i++;
3946 }
3947
3948 /* In order to program the last entries in the table, we need to
3949 * determine the valid offset. If the list is empty, we'll just start
3950 * with 0. Otherwise, we'll start with the last item offset and add 1.
3951 * This ensures that all entries have valid sizes. If we don't do this
3952 * correctly, the hardware will disable flexible field parsing.
3953 */
3954 if (!list_empty(flex_pit_list))
3955 last_offset = list_prev_entry(entry, list)->src_offset + 1;
3956
3957 for (; i < 3; i++, last_offset++) {
3958 i40e_write_rx_ctl(&pf->hw,
3959 I40E_PRTQF_FLX_PIT(flex_pit_start + i),
3960 I40E_FLEX_PREP_VAL(I40E_FLEX_DEST_UNUSED,
3961 1,
3962 last_offset));
3963 }
3964 }
3965
3966 /**
3967 * i40e_reprogram_flex_pit - Reprogram all FLX_PIT tables after input set change
3968 * @pf: pointer to the PF structure
3969 *
3970 * This function reprograms both the L3 and L4 FLX_PIT tables. See the
3971 * internal helper function for implementation details.
3972 **/
i40e_reprogram_flex_pit(struct i40e_pf * pf)3973 static void i40e_reprogram_flex_pit(struct i40e_pf *pf)
3974 {
3975 __i40e_reprogram_flex_pit(pf, &pf->l3_flex_pit_list,
3976 I40E_FLEX_PIT_IDX_START_L3);
3977
3978 __i40e_reprogram_flex_pit(pf, &pf->l4_flex_pit_list,
3979 I40E_FLEX_PIT_IDX_START_L4);
3980
3981 /* We also need to program the L3 and L4 GLQF ORT register */
3982 i40e_write_rx_ctl(&pf->hw,
3983 I40E_GLQF_ORT(I40E_L3_GLQF_ORT_IDX),
3984 I40E_ORT_PREP_VAL(I40E_FLEX_PIT_IDX_START_L3,
3985 3, 1));
3986
3987 i40e_write_rx_ctl(&pf->hw,
3988 I40E_GLQF_ORT(I40E_L4_GLQF_ORT_IDX),
3989 I40E_ORT_PREP_VAL(I40E_FLEX_PIT_IDX_START_L4,
3990 3, 1));
3991 }
3992
3993 /**
3994 * i40e_flow_str - Converts a flow_type into a human readable string
3995 * @fsp: the flow specification
3996 *
3997 * Currently only flow types we support are included here, and the string
3998 * value attempts to match what ethtool would use to configure this flow type.
3999 **/
i40e_flow_str(struct ethtool_rx_flow_spec * fsp)4000 static const char *i40e_flow_str(struct ethtool_rx_flow_spec *fsp)
4001 {
4002 switch (fsp->flow_type & ~FLOW_EXT) {
4003 case TCP_V4_FLOW:
4004 return "tcp4";
4005 case UDP_V4_FLOW:
4006 return "udp4";
4007 case SCTP_V4_FLOW:
4008 return "sctp4";
4009 case IP_USER_FLOW:
4010 return "ip4";
4011 case TCP_V6_FLOW:
4012 return "tcp6";
4013 case UDP_V6_FLOW:
4014 return "udp6";
4015 case SCTP_V6_FLOW:
4016 return "sctp6";
4017 case IPV6_USER_FLOW:
4018 return "ip6";
4019 default:
4020 return "unknown";
4021 }
4022 }
4023
4024 /**
4025 * i40e_pit_index_to_mask - Return the FLEX mask for a given PIT index
4026 * @pit_index: PIT index to convert
4027 *
4028 * Returns the mask for a given PIT index. Will return 0 if the pit_index is
4029 * of range.
4030 **/
i40e_pit_index_to_mask(int pit_index)4031 static u64 i40e_pit_index_to_mask(int pit_index)
4032 {
4033 switch (pit_index) {
4034 case 0:
4035 return I40E_FLEX_50_MASK;
4036 case 1:
4037 return I40E_FLEX_51_MASK;
4038 case 2:
4039 return I40E_FLEX_52_MASK;
4040 case 3:
4041 return I40E_FLEX_53_MASK;
4042 case 4:
4043 return I40E_FLEX_54_MASK;
4044 case 5:
4045 return I40E_FLEX_55_MASK;
4046 case 6:
4047 return I40E_FLEX_56_MASK;
4048 case 7:
4049 return I40E_FLEX_57_MASK;
4050 default:
4051 return 0;
4052 }
4053 }
4054
4055 /**
4056 * i40e_print_input_set - Show changes between two input sets
4057 * @vsi: the vsi being configured
4058 * @old: the old input set
4059 * @new: the new input set
4060 *
4061 * Print the difference between old and new input sets by showing which series
4062 * of words are toggled on or off. Only displays the bits we actually support
4063 * changing.
4064 **/
i40e_print_input_set(struct i40e_vsi * vsi,u64 old,u64 new)4065 static void i40e_print_input_set(struct i40e_vsi *vsi, u64 old, u64 new)
4066 {
4067 struct i40e_pf *pf = vsi->back;
4068 bool old_value, new_value;
4069 int i;
4070
4071 old_value = !!(old & I40E_L3_SRC_MASK);
4072 new_value = !!(new & I40E_L3_SRC_MASK);
4073 if (old_value != new_value)
4074 netif_info(pf, drv, vsi->netdev, "L3 source address: %s -> %s\n",
4075 old_value ? "ON" : "OFF",
4076 new_value ? "ON" : "OFF");
4077
4078 old_value = !!(old & I40E_L3_DST_MASK);
4079 new_value = !!(new & I40E_L3_DST_MASK);
4080 if (old_value != new_value)
4081 netif_info(pf, drv, vsi->netdev, "L3 destination address: %s -> %s\n",
4082 old_value ? "ON" : "OFF",
4083 new_value ? "ON" : "OFF");
4084
4085 old_value = !!(old & I40E_L4_SRC_MASK);
4086 new_value = !!(new & I40E_L4_SRC_MASK);
4087 if (old_value != new_value)
4088 netif_info(pf, drv, vsi->netdev, "L4 source port: %s -> %s\n",
4089 old_value ? "ON" : "OFF",
4090 new_value ? "ON" : "OFF");
4091
4092 old_value = !!(old & I40E_L4_DST_MASK);
4093 new_value = !!(new & I40E_L4_DST_MASK);
4094 if (old_value != new_value)
4095 netif_info(pf, drv, vsi->netdev, "L4 destination port: %s -> %s\n",
4096 old_value ? "ON" : "OFF",
4097 new_value ? "ON" : "OFF");
4098
4099 old_value = !!(old & I40E_VERIFY_TAG_MASK);
4100 new_value = !!(new & I40E_VERIFY_TAG_MASK);
4101 if (old_value != new_value)
4102 netif_info(pf, drv, vsi->netdev, "SCTP verification tag: %s -> %s\n",
4103 old_value ? "ON" : "OFF",
4104 new_value ? "ON" : "OFF");
4105
4106 /* Show change of flexible filter entries */
4107 for (i = 0; i < I40E_FLEX_INDEX_ENTRIES; i++) {
4108 u64 flex_mask = i40e_pit_index_to_mask(i);
4109
4110 old_value = !!(old & flex_mask);
4111 new_value = !!(new & flex_mask);
4112 if (old_value != new_value)
4113 netif_info(pf, drv, vsi->netdev, "FLEX index %d: %s -> %s\n",
4114 i,
4115 old_value ? "ON" : "OFF",
4116 new_value ? "ON" : "OFF");
4117 }
4118
4119 netif_info(pf, drv, vsi->netdev, " Current input set: %0llx\n",
4120 old);
4121 netif_info(pf, drv, vsi->netdev, "Requested input set: %0llx\n",
4122 new);
4123 }
4124
4125 /**
4126 * i40e_check_fdir_input_set - Check that a given rx_flow_spec mask is valid
4127 * @vsi: pointer to the targeted VSI
4128 * @fsp: pointer to Rx flow specification
4129 * @userdef: userdefined data from flow specification
4130 *
4131 * Ensures that a given ethtool_rx_flow_spec has a valid mask. Some support
4132 * for partial matches exists with a few limitations. First, hardware only
4133 * supports masking by word boundary (2 bytes) and not per individual bit.
4134 * Second, hardware is limited to using one mask for a flow type and cannot
4135 * use a separate mask for each filter.
4136 *
4137 * To support these limitations, if we already have a configured filter for
4138 * the specified type, this function enforces that new filters of the type
4139 * match the configured input set. Otherwise, if we do not have a filter of
4140 * the specified type, we allow the input set to be updated to match the
4141 * desired filter.
4142 *
4143 * To help ensure that administrators understand why filters weren't displayed
4144 * as supported, we print a diagnostic message displaying how the input set
4145 * would change and warning to delete the preexisting filters if required.
4146 *
4147 * Returns 0 on successful input set match, and a negative return code on
4148 * failure.
4149 **/
i40e_check_fdir_input_set(struct i40e_vsi * vsi,struct ethtool_rx_flow_spec * fsp,struct i40e_rx_flow_userdef * userdef)4150 static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
4151 struct ethtool_rx_flow_spec *fsp,
4152 struct i40e_rx_flow_userdef *userdef)
4153 {
4154 static const __be32 ipv6_full_mask[4] = {cpu_to_be32(0xffffffff),
4155 cpu_to_be32(0xffffffff), cpu_to_be32(0xffffffff),
4156 cpu_to_be32(0xffffffff)};
4157 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4158 struct ethtool_usrip6_spec *usr_ip6_spec;
4159 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4160 struct ethtool_usrip4_spec *usr_ip4_spec;
4161 struct i40e_pf *pf = vsi->back;
4162 u64 current_mask, new_mask;
4163 bool new_flex_offset = false;
4164 bool flex_l3 = false;
4165 u16 *fdir_filter_count;
4166 u16 index, src_offset = 0;
4167 u8 pit_index = 0;
4168 int err;
4169
4170 switch (fsp->flow_type & ~FLOW_EXT) {
4171 case SCTP_V4_FLOW:
4172 index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
4173 fdir_filter_count = &pf->fd_sctp4_filter_cnt;
4174 break;
4175 case TCP_V4_FLOW:
4176 index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
4177 fdir_filter_count = &pf->fd_tcp4_filter_cnt;
4178 break;
4179 case UDP_V4_FLOW:
4180 index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
4181 fdir_filter_count = &pf->fd_udp4_filter_cnt;
4182 break;
4183 case SCTP_V6_FLOW:
4184 index = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
4185 fdir_filter_count = &pf->fd_sctp6_filter_cnt;
4186 break;
4187 case TCP_V6_FLOW:
4188 index = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
4189 fdir_filter_count = &pf->fd_tcp6_filter_cnt;
4190 break;
4191 case UDP_V6_FLOW:
4192 index = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
4193 fdir_filter_count = &pf->fd_udp6_filter_cnt;
4194 break;
4195 case IP_USER_FLOW:
4196 index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
4197 fdir_filter_count = &pf->fd_ip4_filter_cnt;
4198 flex_l3 = true;
4199 break;
4200 case IPV6_USER_FLOW:
4201 index = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
4202 fdir_filter_count = &pf->fd_ip6_filter_cnt;
4203 flex_l3 = true;
4204 break;
4205 default:
4206 return -EOPNOTSUPP;
4207 }
4208
4209 /* Read the current input set from register memory. */
4210 current_mask = i40e_read_fd_input_set(pf, index);
4211 new_mask = current_mask;
4212
4213 /* Determine, if any, the required changes to the input set in order
4214 * to support the provided mask.
4215 *
4216 * Hardware only supports masking at word (2 byte) granularity and does
4217 * not support full bitwise masking. This implementation simplifies
4218 * even further and only supports fully enabled or fully disabled
4219 * masks for each field, even though we could split the ip4src and
4220 * ip4dst fields.
4221 */
4222 switch (fsp->flow_type & ~FLOW_EXT) {
4223 case SCTP_V4_FLOW:
4224 new_mask &= ~I40E_VERIFY_TAG_MASK;
4225 fallthrough;
4226 case TCP_V4_FLOW:
4227 case UDP_V4_FLOW:
4228 tcp_ip4_spec = &fsp->m_u.tcp_ip4_spec;
4229
4230 /* IPv4 source address */
4231 if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF))
4232 new_mask |= I40E_L3_SRC_MASK;
4233 else if (!tcp_ip4_spec->ip4src)
4234 new_mask &= ~I40E_L3_SRC_MASK;
4235 else
4236 return -EOPNOTSUPP;
4237
4238 /* IPv4 destination address */
4239 if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
4240 new_mask |= I40E_L3_DST_MASK;
4241 else if (!tcp_ip4_spec->ip4dst)
4242 new_mask &= ~I40E_L3_DST_MASK;
4243 else
4244 return -EOPNOTSUPP;
4245
4246 /* L4 source port */
4247 if (tcp_ip4_spec->psrc == htons(0xFFFF))
4248 new_mask |= I40E_L4_SRC_MASK;
4249 else if (!tcp_ip4_spec->psrc)
4250 new_mask &= ~I40E_L4_SRC_MASK;
4251 else
4252 return -EOPNOTSUPP;
4253
4254 /* L4 destination port */
4255 if (tcp_ip4_spec->pdst == htons(0xFFFF))
4256 new_mask |= I40E_L4_DST_MASK;
4257 else if (!tcp_ip4_spec->pdst)
4258 new_mask &= ~I40E_L4_DST_MASK;
4259 else
4260 return -EOPNOTSUPP;
4261
4262 /* Filtering on Type of Service is not supported. */
4263 if (tcp_ip4_spec->tos)
4264 return -EOPNOTSUPP;
4265
4266 break;
4267 case SCTP_V6_FLOW:
4268 new_mask &= ~I40E_VERIFY_TAG_MASK;
4269 fallthrough;
4270 case TCP_V6_FLOW:
4271 case UDP_V6_FLOW:
4272 tcp_ip6_spec = &fsp->m_u.tcp_ip6_spec;
4273
4274 /* Check if user provided IPv6 source address. */
4275 if (ipv6_addr_equal((struct in6_addr *)&tcp_ip6_spec->ip6src,
4276 (struct in6_addr *)&ipv6_full_mask))
4277 new_mask |= I40E_L3_V6_SRC_MASK;
4278 else if (ipv6_addr_any((struct in6_addr *)
4279 &tcp_ip6_spec->ip6src))
4280 new_mask &= ~I40E_L3_V6_SRC_MASK;
4281 else
4282 return -EOPNOTSUPP;
4283
4284 /* Check if user provided destination address. */
4285 if (ipv6_addr_equal((struct in6_addr *)&tcp_ip6_spec->ip6dst,
4286 (struct in6_addr *)&ipv6_full_mask))
4287 new_mask |= I40E_L3_V6_DST_MASK;
4288 else if (ipv6_addr_any((struct in6_addr *)
4289 &tcp_ip6_spec->ip6dst))
4290 new_mask &= ~I40E_L3_V6_DST_MASK;
4291 else
4292 return -EOPNOTSUPP;
4293
4294 /* L4 source port */
4295 if (tcp_ip6_spec->psrc == htons(0xFFFF))
4296 new_mask |= I40E_L4_SRC_MASK;
4297 else if (!tcp_ip6_spec->psrc)
4298 new_mask &= ~I40E_L4_SRC_MASK;
4299 else
4300 return -EOPNOTSUPP;
4301
4302 /* L4 destination port */
4303 if (tcp_ip6_spec->pdst == htons(0xFFFF))
4304 new_mask |= I40E_L4_DST_MASK;
4305 else if (!tcp_ip6_spec->pdst)
4306 new_mask &= ~I40E_L4_DST_MASK;
4307 else
4308 return -EOPNOTSUPP;
4309
4310 /* Filtering on Traffic Classes is not supported. */
4311 if (tcp_ip6_spec->tclass)
4312 return -EOPNOTSUPP;
4313 break;
4314 case IP_USER_FLOW:
4315 usr_ip4_spec = &fsp->m_u.usr_ip4_spec;
4316
4317 /* IPv4 source address */
4318 if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF))
4319 new_mask |= I40E_L3_SRC_MASK;
4320 else if (!usr_ip4_spec->ip4src)
4321 new_mask &= ~I40E_L3_SRC_MASK;
4322 else
4323 return -EOPNOTSUPP;
4324
4325 /* IPv4 destination address */
4326 if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
4327 new_mask |= I40E_L3_DST_MASK;
4328 else if (!usr_ip4_spec->ip4dst)
4329 new_mask &= ~I40E_L3_DST_MASK;
4330 else
4331 return -EOPNOTSUPP;
4332
4333 /* First 4 bytes of L4 header */
4334 if (usr_ip4_spec->l4_4_bytes == htonl(0xFFFFFFFF))
4335 new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK;
4336 else if (!usr_ip4_spec->l4_4_bytes)
4337 new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
4338 else
4339 return -EOPNOTSUPP;
4340
4341 /* Filtering on Type of Service is not supported. */
4342 if (usr_ip4_spec->tos)
4343 return -EOPNOTSUPP;
4344
4345 /* Filtering on IP version is not supported */
4346 if (usr_ip4_spec->ip_ver)
4347 return -EINVAL;
4348
4349 /* Filtering on L4 protocol is not supported */
4350 if (usr_ip4_spec->proto)
4351 return -EINVAL;
4352
4353 break;
4354 case IPV6_USER_FLOW:
4355 usr_ip6_spec = &fsp->m_u.usr_ip6_spec;
4356
4357 /* Check if user provided IPv6 source address. */
4358 if (ipv6_addr_equal((struct in6_addr *)&usr_ip6_spec->ip6src,
4359 (struct in6_addr *)&ipv6_full_mask))
4360 new_mask |= I40E_L3_V6_SRC_MASK;
4361 else if (ipv6_addr_any((struct in6_addr *)
4362 &usr_ip6_spec->ip6src))
4363 new_mask &= ~I40E_L3_V6_SRC_MASK;
4364 else
4365 return -EOPNOTSUPP;
4366
4367 /* Check if user provided destination address. */
4368 if (ipv6_addr_equal((struct in6_addr *)&usr_ip6_spec->ip6dst,
4369 (struct in6_addr *)&ipv6_full_mask))
4370 new_mask |= I40E_L3_V6_DST_MASK;
4371 else if (ipv6_addr_any((struct in6_addr *)
4372 &usr_ip6_spec->ip6src))
4373 new_mask &= ~I40E_L3_V6_DST_MASK;
4374 else
4375 return -EOPNOTSUPP;
4376
4377 if (usr_ip6_spec->l4_4_bytes == htonl(0xFFFFFFFF))
4378 new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK;
4379 else if (!usr_ip6_spec->l4_4_bytes)
4380 new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
4381 else
4382 return -EOPNOTSUPP;
4383
4384 /* Filtering on Traffic class is not supported. */
4385 if (usr_ip6_spec->tclass)
4386 return -EOPNOTSUPP;
4387
4388 /* Filtering on L4 protocol is not supported */
4389 if (usr_ip6_spec->l4_proto)
4390 return -EINVAL;
4391
4392 break;
4393 default:
4394 return -EOPNOTSUPP;
4395 }
4396
4397 if (fsp->flow_type & FLOW_EXT) {
4398 /* Allow only 802.1Q and no etype defined, as
4399 * later it's modified to 0x8100
4400 */
4401 if (fsp->h_ext.vlan_etype != htons(ETH_P_8021Q) &&
4402 fsp->h_ext.vlan_etype != 0)
4403 return -EOPNOTSUPP;
4404 if (fsp->m_ext.vlan_tci == htons(0xFFFF))
4405 new_mask |= I40E_VLAN_SRC_MASK;
4406 else
4407 new_mask &= ~I40E_VLAN_SRC_MASK;
4408 }
4409
4410 /* First, clear all flexible filter entries */
4411 new_mask &= ~I40E_FLEX_INPUT_MASK;
4412
4413 /* If we have a flexible filter, try to add this offset to the correct
4414 * flexible filter PIT list. Once finished, we can update the mask.
4415 * If the src_offset changed, we will get a new mask value which will
4416 * trigger an input set change.
4417 */
4418 if (userdef->flex_filter) {
4419 struct i40e_flex_pit *l3_flex_pit = NULL, *flex_pit = NULL;
4420
4421 /* Flexible offset must be even, since the flexible payload
4422 * must be aligned on 2-byte boundary.
4423 */
4424 if (userdef->flex_offset & 0x1) {
4425 dev_warn(&pf->pdev->dev,
4426 "Flexible data offset must be 2-byte aligned\n");
4427 return -EINVAL;
4428 }
4429
4430 src_offset = userdef->flex_offset >> 1;
4431
4432 /* FLX_PIT source offset value is only so large */
4433 if (src_offset > I40E_MAX_FLEX_SRC_OFFSET) {
4434 dev_warn(&pf->pdev->dev,
4435 "Flexible data must reside within first 64 bytes of the packet payload\n");
4436 return -EINVAL;
4437 }
4438
4439 /* See if this offset has already been programmed. If we get
4440 * an ERR_PTR, then the filter is not safe to add. Otherwise,
4441 * if we get a NULL pointer, this means we will need to add
4442 * the offset.
4443 */
4444 flex_pit = i40e_find_flex_offset(&pf->l4_flex_pit_list,
4445 src_offset);
4446 if (IS_ERR(flex_pit))
4447 return PTR_ERR(flex_pit);
4448
4449 /* IP_USER_FLOW filters match both L4 (ICMP) and L3 (unknown)
4450 * packet types, and thus we need to program both L3 and L4
4451 * flexible values. These must have identical flexible index,
4452 * as otherwise we can't correctly program the input set. So
4453 * we'll find both an L3 and L4 index and make sure they are
4454 * the same.
4455 */
4456 if (flex_l3) {
4457 l3_flex_pit =
4458 i40e_find_flex_offset(&pf->l3_flex_pit_list,
4459 src_offset);
4460 if (IS_ERR(l3_flex_pit))
4461 return PTR_ERR(l3_flex_pit);
4462
4463 if (flex_pit) {
4464 /* If we already had a matching L4 entry, we
4465 * need to make sure that the L3 entry we
4466 * obtained uses the same index.
4467 */
4468 if (l3_flex_pit) {
4469 if (l3_flex_pit->pit_index !=
4470 flex_pit->pit_index) {
4471 return -EINVAL;
4472 }
4473 } else {
4474 new_flex_offset = true;
4475 }
4476 } else {
4477 flex_pit = l3_flex_pit;
4478 }
4479 }
4480
4481 /* If we didn't find an existing flex offset, we need to
4482 * program a new one. However, we don't immediately program it
4483 * here because we will wait to program until after we check
4484 * that it is safe to change the input set.
4485 */
4486 if (!flex_pit) {
4487 new_flex_offset = true;
4488 pit_index = i40e_unused_pit_index(pf);
4489 } else {
4490 pit_index = flex_pit->pit_index;
4491 }
4492
4493 /* Update the mask with the new offset */
4494 new_mask |= i40e_pit_index_to_mask(pit_index);
4495 }
4496
4497 /* If the mask and flexible filter offsets for this filter match the
4498 * currently programmed values we don't need any input set change, so
4499 * this filter is safe to install.
4500 */
4501 if (new_mask == current_mask && !new_flex_offset)
4502 return 0;
4503
4504 netif_info(pf, drv, vsi->netdev, "Input set change requested for %s flows:\n",
4505 i40e_flow_str(fsp));
4506 i40e_print_input_set(vsi, current_mask, new_mask);
4507 if (new_flex_offset) {
4508 netif_info(pf, drv, vsi->netdev, "FLEX index %d: Offset -> %d",
4509 pit_index, src_offset);
4510 }
4511
4512 /* Hardware input sets are global across multiple ports, so even the
4513 * main port cannot change them when in MFP mode as this would impact
4514 * any filters on the other ports.
4515 */
4516 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4517 netif_err(pf, drv, vsi->netdev, "Cannot change Flow Director input sets while MFP is enabled\n");
4518 return -EOPNOTSUPP;
4519 }
4520
4521 /* This filter requires us to update the input set. However, hardware
4522 * only supports one input set per flow type, and does not support
4523 * separate masks for each filter. This means that we can only support
4524 * a single mask for all filters of a specific type.
4525 *
4526 * If we have preexisting filters, they obviously depend on the
4527 * current programmed input set. Display a diagnostic message in this
4528 * case explaining why the filter could not be accepted.
4529 */
4530 if (*fdir_filter_count) {
4531 netif_err(pf, drv, vsi->netdev, "Cannot change input set for %s flows until %d preexisting filters are removed\n",
4532 i40e_flow_str(fsp),
4533 *fdir_filter_count);
4534 return -EOPNOTSUPP;
4535 }
4536
4537 i40e_write_fd_input_set(pf, index, new_mask);
4538
4539 /* IP_USER_FLOW filters match both IPv4/Other and IPv4/Fragmented
4540 * frames. If we're programming the input set for IPv4/Other, we also
4541 * need to program the IPv4/Fragmented input set. Since we don't have
4542 * separate support, we'll always assume and enforce that the two flow
4543 * types must have matching input sets.
4544 */
4545 if (index == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER)
4546 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
4547 new_mask);
4548
4549 /* Add the new offset and update table, if necessary */
4550 if (new_flex_offset) {
4551 err = i40e_add_flex_offset(&pf->l4_flex_pit_list, src_offset,
4552 pit_index);
4553 if (err)
4554 return err;
4555
4556 if (flex_l3) {
4557 err = i40e_add_flex_offset(&pf->l3_flex_pit_list,
4558 src_offset,
4559 pit_index);
4560 if (err)
4561 return err;
4562 }
4563
4564 i40e_reprogram_flex_pit(pf);
4565 }
4566
4567 return 0;
4568 }
4569
4570 /**
4571 * i40e_match_fdir_filter - Return true of two filters match
4572 * @a: pointer to filter struct
4573 * @b: pointer to filter struct
4574 *
4575 * Returns true if the two filters match exactly the same criteria. I.e. they
4576 * match the same flow type and have the same parameters. We don't need to
4577 * check any input-set since all filters of the same flow type must use the
4578 * same input set.
4579 **/
i40e_match_fdir_filter(struct i40e_fdir_filter * a,struct i40e_fdir_filter * b)4580 static bool i40e_match_fdir_filter(struct i40e_fdir_filter *a,
4581 struct i40e_fdir_filter *b)
4582 {
4583 /* The filters do not much if any of these criteria differ. */
4584 if (a->dst_ip != b->dst_ip ||
4585 a->src_ip != b->src_ip ||
4586 a->dst_port != b->dst_port ||
4587 a->src_port != b->src_port ||
4588 a->flow_type != b->flow_type ||
4589 a->ipl4_proto != b->ipl4_proto ||
4590 a->vlan_tag != b->vlan_tag ||
4591 a->vlan_etype != b->vlan_etype)
4592 return false;
4593
4594 return true;
4595 }
4596
4597 /**
4598 * i40e_disallow_matching_filters - Check that new filters differ
4599 * @vsi: pointer to the targeted VSI
4600 * @input: new filter to check
4601 *
4602 * Due to hardware limitations, it is not possible for two filters that match
4603 * similar criteria to be programmed at the same time. This is true for a few
4604 * reasons:
4605 *
4606 * (a) all filters matching a particular flow type must use the same input
4607 * set, that is they must match the same criteria.
4608 * (b) different flow types will never match the same packet, as the flow type
4609 * is decided by hardware before checking which rules apply.
4610 * (c) hardware has no way to distinguish which order filters apply in.
4611 *
4612 * Due to this, we can't really support using the location data to order
4613 * filters in the hardware parsing. It is technically possible for the user to
4614 * request two filters matching the same criteria but which select different
4615 * queues. In this case, rather than keep both filters in the list, we reject
4616 * the 2nd filter when the user requests adding it.
4617 *
4618 * This avoids needing to track location for programming the filter to
4619 * hardware, and ensures that we avoid some strange scenarios involving
4620 * deleting filters which match the same criteria.
4621 **/
i40e_disallow_matching_filters(struct i40e_vsi * vsi,struct i40e_fdir_filter * input)4622 static int i40e_disallow_matching_filters(struct i40e_vsi *vsi,
4623 struct i40e_fdir_filter *input)
4624 {
4625 struct i40e_pf *pf = vsi->back;
4626 struct i40e_fdir_filter *rule;
4627 struct hlist_node *node2;
4628
4629 /* Loop through every filter, and check that it doesn't match */
4630 hlist_for_each_entry_safe(rule, node2,
4631 &pf->fdir_filter_list, fdir_node) {
4632 /* Don't check the filters match if they share the same fd_id,
4633 * since the new filter is actually just updating the target
4634 * of the old filter.
4635 */
4636 if (rule->fd_id == input->fd_id)
4637 continue;
4638
4639 /* If any filters match, then print a warning message to the
4640 * kernel message buffer and bail out.
4641 */
4642 if (i40e_match_fdir_filter(rule, input)) {
4643 dev_warn(&pf->pdev->dev,
4644 "Existing user defined filter %d already matches this flow.\n",
4645 rule->fd_id);
4646 return -EINVAL;
4647 }
4648 }
4649
4650 return 0;
4651 }
4652
4653 /**
4654 * i40e_add_fdir_ethtool - Add/Remove Flow Director filters
4655 * @vsi: pointer to the targeted VSI
4656 * @cmd: command to get or set RX flow classification rules
4657 *
4658 * Add Flow Director filters for a specific flow spec based on their
4659 * protocol. Returns 0 if the filters were successfully added.
4660 **/
i40e_add_fdir_ethtool(struct i40e_vsi * vsi,struct ethtool_rxnfc * cmd)4661 static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
4662 struct ethtool_rxnfc *cmd)
4663 {
4664 struct i40e_rx_flow_userdef userdef;
4665 struct ethtool_rx_flow_spec *fsp;
4666 struct i40e_fdir_filter *input;
4667 u16 dest_vsi = 0, q_index = 0;
4668 struct i40e_pf *pf;
4669 int ret = -EINVAL;
4670 u8 dest_ctl;
4671
4672 if (!vsi)
4673 return -EINVAL;
4674 pf = vsi->back;
4675
4676 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
4677 return -EOPNOTSUPP;
4678
4679 if (test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
4680 return -ENOSPC;
4681
4682 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
4683 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
4684 return -EBUSY;
4685
4686 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
4687 return -EBUSY;
4688
4689 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
4690
4691 /* Parse the user-defined field */
4692 if (i40e_parse_rx_flow_user_data(fsp, &userdef))
4693 return -EINVAL;
4694
4695 /* Extended MAC field is not supported */
4696 if (fsp->flow_type & FLOW_MAC_EXT)
4697 return -EINVAL;
4698
4699 ret = i40e_check_fdir_input_set(vsi, fsp, &userdef);
4700 if (ret)
4701 return ret;
4702
4703 if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
4704 pf->hw.func_caps.fd_filters_guaranteed)) {
4705 return -EINVAL;
4706 }
4707
4708 /* ring_cookie is either the drop index, or is a mask of the queue
4709 * index and VF id we wish to target.
4710 */
4711 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
4712 dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
4713 } else {
4714 u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
4715 u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
4716
4717 if (!vf) {
4718 if (ring >= vsi->num_queue_pairs)
4719 return -EINVAL;
4720 dest_vsi = vsi->id;
4721 } else {
4722 /* VFs are zero-indexed, so we subtract one here */
4723 vf--;
4724
4725 if (vf >= pf->num_alloc_vfs)
4726 return -EINVAL;
4727 if (ring >= pf->vf[vf].num_queue_pairs)
4728 return -EINVAL;
4729 dest_vsi = pf->vf[vf].lan_vsi_id;
4730 }
4731 dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
4732 q_index = ring;
4733 }
4734
4735 input = kzalloc(sizeof(*input), GFP_KERNEL);
4736
4737 if (!input)
4738 return -ENOMEM;
4739
4740 input->fd_id = fsp->location;
4741 input->q_index = q_index;
4742 input->dest_vsi = dest_vsi;
4743 input->dest_ctl = dest_ctl;
4744 input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
4745 input->cnt_index = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
4746 input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src;
4747 input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
4748 input->flow_type = fsp->flow_type & ~FLOW_EXT;
4749
4750 input->vlan_etype = fsp->h_ext.vlan_etype;
4751 if (!fsp->m_ext.vlan_etype && fsp->h_ext.vlan_tci)
4752 input->vlan_etype = cpu_to_be16(ETH_P_8021Q);
4753 if (fsp->m_ext.vlan_tci && input->vlan_etype)
4754 input->vlan_tag = fsp->h_ext.vlan_tci;
4755 if (input->flow_type == IPV6_USER_FLOW ||
4756 input->flow_type == UDP_V6_FLOW ||
4757 input->flow_type == TCP_V6_FLOW ||
4758 input->flow_type == SCTP_V6_FLOW) {
4759 /* Reverse the src and dest notion, since the HW expects them
4760 * to be from Tx perspective where as the input from user is
4761 * from Rx filter view.
4762 */
4763 input->ipl4_proto = fsp->h_u.usr_ip6_spec.l4_proto;
4764 input->dst_port = fsp->h_u.tcp_ip6_spec.psrc;
4765 input->src_port = fsp->h_u.tcp_ip6_spec.pdst;
4766 memcpy(input->dst_ip6, fsp->h_u.ah_ip6_spec.ip6src,
4767 sizeof(__be32) * 4);
4768 memcpy(input->src_ip6, fsp->h_u.ah_ip6_spec.ip6dst,
4769 sizeof(__be32) * 4);
4770 } else {
4771 /* Reverse the src and dest notion, since the HW expects them
4772 * to be from Tx perspective where as the input from user is
4773 * from Rx filter view.
4774 */
4775 input->ipl4_proto = fsp->h_u.usr_ip4_spec.proto;
4776 input->dst_port = fsp->h_u.tcp_ip4_spec.psrc;
4777 input->src_port = fsp->h_u.tcp_ip4_spec.pdst;
4778 input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src;
4779 input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
4780 }
4781
4782 if (userdef.flex_filter) {
4783 input->flex_filter = true;
4784 input->flex_word = cpu_to_be16(userdef.flex_word);
4785 input->flex_offset = userdef.flex_offset;
4786 }
4787
4788 /* Avoid programming two filters with identical match criteria. */
4789 ret = i40e_disallow_matching_filters(vsi, input);
4790 if (ret)
4791 goto free_filter_memory;
4792
4793 /* Add the input filter to the fdir_input_list, possibly replacing
4794 * a previous filter. Do not free the input structure after adding it
4795 * to the list as this would cause a use-after-free bug.
4796 */
4797 i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
4798 ret = i40e_add_del_fdir(vsi, input, true);
4799 if (ret)
4800 goto remove_sw_rule;
4801 return 0;
4802
4803 remove_sw_rule:
4804 hlist_del(&input->fdir_node);
4805 pf->fdir_pf_active_filters--;
4806 free_filter_memory:
4807 kfree(input);
4808 return ret;
4809 }
4810
4811 /**
4812 * i40e_set_rxnfc - command to set RX flow classification rules
4813 * @netdev: network interface device structure
4814 * @cmd: ethtool rxnfc command
4815 *
4816 * Returns Success if the command is supported.
4817 **/
i40e_set_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd)4818 static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
4819 {
4820 struct i40e_netdev_priv *np = netdev_priv(netdev);
4821 struct i40e_vsi *vsi = np->vsi;
4822 struct i40e_pf *pf = vsi->back;
4823 int ret = -EOPNOTSUPP;
4824
4825 switch (cmd->cmd) {
4826 case ETHTOOL_SRXFH:
4827 ret = i40e_set_rss_hash_opt(pf, cmd);
4828 break;
4829 case ETHTOOL_SRXCLSRLINS:
4830 ret = i40e_add_fdir_ethtool(vsi, cmd);
4831 break;
4832 case ETHTOOL_SRXCLSRLDEL:
4833 ret = i40e_del_fdir_entry(vsi, cmd);
4834 break;
4835 default:
4836 break;
4837 }
4838
4839 return ret;
4840 }
4841
4842 /**
4843 * i40e_max_channels - get Max number of combined channels supported
4844 * @vsi: vsi pointer
4845 **/
i40e_max_channels(struct i40e_vsi * vsi)4846 static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
4847 {
4848 /* TODO: This code assumes DCB and FD is disabled for now. */
4849 return vsi->alloc_queue_pairs;
4850 }
4851
4852 /**
4853 * i40e_get_channels - Get the current channels enabled and max supported etc.
4854 * @dev: network interface device structure
4855 * @ch: ethtool channels structure
4856 *
4857 * We don't support separate tx and rx queues as channels. The other count
4858 * represents how many queues are being used for control. max_combined counts
4859 * how many queue pairs we can support. They may not be mapped 1 to 1 with
4860 * q_vectors since we support a lot more queue pairs than q_vectors.
4861 **/
i40e_get_channels(struct net_device * dev,struct ethtool_channels * ch)4862 static void i40e_get_channels(struct net_device *dev,
4863 struct ethtool_channels *ch)
4864 {
4865 struct i40e_netdev_priv *np = netdev_priv(dev);
4866 struct i40e_vsi *vsi = np->vsi;
4867 struct i40e_pf *pf = vsi->back;
4868
4869 /* report maximum channels */
4870 ch->max_combined = i40e_max_channels(vsi);
4871
4872 /* report info for other vector */
4873 ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0;
4874 ch->max_other = ch->other_count;
4875
4876 /* Note: This code assumes DCB is disabled for now. */
4877 ch->combined_count = vsi->num_queue_pairs;
4878 }
4879
4880 /**
4881 * i40e_set_channels - Set the new channels count.
4882 * @dev: network interface device structure
4883 * @ch: ethtool channels structure
4884 *
4885 * The new channels count may not be the same as requested by the user
4886 * since it gets rounded down to a power of 2 value.
4887 **/
i40e_set_channels(struct net_device * dev,struct ethtool_channels * ch)4888 static int i40e_set_channels(struct net_device *dev,
4889 struct ethtool_channels *ch)
4890 {
4891 const u8 drop = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
4892 struct i40e_netdev_priv *np = netdev_priv(dev);
4893 unsigned int count = ch->combined_count;
4894 struct i40e_vsi *vsi = np->vsi;
4895 struct i40e_pf *pf = vsi->back;
4896 struct i40e_fdir_filter *rule;
4897 struct hlist_node *node2;
4898 int new_count;
4899 int err = 0;
4900
4901 /* We do not support setting channels for any other VSI at present */
4902 if (vsi->type != I40E_VSI_MAIN)
4903 return -EINVAL;
4904
4905 /* We do not support setting channels via ethtool when TCs are
4906 * configured through mqprio
4907 */
4908 if (pf->flags & I40E_FLAG_TC_MQPRIO)
4909 return -EINVAL;
4910
4911 /* verify they are not requesting separate vectors */
4912 if (!count || ch->rx_count || ch->tx_count)
4913 return -EINVAL;
4914
4915 /* verify other_count has not changed */
4916 if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0))
4917 return -EINVAL;
4918
4919 /* verify the number of channels does not exceed hardware limits */
4920 if (count > i40e_max_channels(vsi))
4921 return -EINVAL;
4922
4923 /* verify that the number of channels does not invalidate any current
4924 * flow director rules
4925 */
4926 hlist_for_each_entry_safe(rule, node2,
4927 &pf->fdir_filter_list, fdir_node) {
4928 if (rule->dest_ctl != drop && count <= rule->q_index) {
4929 dev_warn(&pf->pdev->dev,
4930 "Existing user defined filter %d assigns flow to queue %d\n",
4931 rule->fd_id, rule->q_index);
4932 err = -EINVAL;
4933 }
4934 }
4935
4936 if (err) {
4937 dev_err(&pf->pdev->dev,
4938 "Existing filter rules must be deleted to reduce combined channel count to %d\n",
4939 count);
4940 return err;
4941 }
4942
4943 /* update feature limits from largest to smallest supported values */
4944 /* TODO: Flow director limit, DCB etc */
4945
4946 /* use rss_reconfig to rebuild with new queue count and update traffic
4947 * class queue mapping
4948 */
4949 new_count = i40e_reconfig_rss_queues(pf, count);
4950 if (new_count > 0)
4951 return 0;
4952 else
4953 return -EINVAL;
4954 }
4955
4956 /**
4957 * i40e_get_rxfh_key_size - get the RSS hash key size
4958 * @netdev: network interface device structure
4959 *
4960 * Returns the table size.
4961 **/
i40e_get_rxfh_key_size(struct net_device * netdev)4962 static u32 i40e_get_rxfh_key_size(struct net_device *netdev)
4963 {
4964 return I40E_HKEY_ARRAY_SIZE;
4965 }
4966
4967 /**
4968 * i40e_get_rxfh_indir_size - get the rx flow hash indirection table size
4969 * @netdev: network interface device structure
4970 *
4971 * Returns the table size.
4972 **/
i40e_get_rxfh_indir_size(struct net_device * netdev)4973 static u32 i40e_get_rxfh_indir_size(struct net_device *netdev)
4974 {
4975 return I40E_HLUT_ARRAY_SIZE;
4976 }
4977
4978 /**
4979 * i40e_get_rxfh - get the rx flow hash indirection table
4980 * @netdev: network interface device structure
4981 * @indir: indirection table
4982 * @key: hash key
4983 * @hfunc: hash function
4984 *
4985 * Reads the indirection table directly from the hardware. Returns 0 on
4986 * success.
4987 **/
i40e_get_rxfh(struct net_device * netdev,u32 * indir,u8 * key,u8 * hfunc)4988 static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
4989 u8 *hfunc)
4990 {
4991 struct i40e_netdev_priv *np = netdev_priv(netdev);
4992 struct i40e_vsi *vsi = np->vsi;
4993 u8 *lut, *seed = NULL;
4994 int ret;
4995 u16 i;
4996
4997 if (hfunc)
4998 *hfunc = ETH_RSS_HASH_TOP;
4999
5000 if (!indir)
5001 return 0;
5002
5003 seed = key;
5004 lut = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL);
5005 if (!lut)
5006 return -ENOMEM;
5007 ret = i40e_get_rss(vsi, seed, lut, I40E_HLUT_ARRAY_SIZE);
5008 if (ret)
5009 goto out;
5010 for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
5011 indir[i] = (u32)(lut[i]);
5012
5013 out:
5014 kfree(lut);
5015
5016 return ret;
5017 }
5018
5019 /**
5020 * i40e_set_rxfh - set the rx flow hash indirection table
5021 * @netdev: network interface device structure
5022 * @indir: indirection table
5023 * @key: hash key
5024 * @hfunc: hash function to use
5025 *
5026 * Returns -EINVAL if the table specifies an invalid queue id, otherwise
5027 * returns 0 after programming the table.
5028 **/
i40e_set_rxfh(struct net_device * netdev,const u32 * indir,const u8 * key,const u8 hfunc)5029 static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
5030 const u8 *key, const u8 hfunc)
5031 {
5032 struct i40e_netdev_priv *np = netdev_priv(netdev);
5033 struct i40e_vsi *vsi = np->vsi;
5034 struct i40e_pf *pf = vsi->back;
5035 u8 *seed = NULL;
5036 u16 i;
5037
5038 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
5039 return -EOPNOTSUPP;
5040
5041 if (key) {
5042 if (!vsi->rss_hkey_user) {
5043 vsi->rss_hkey_user = kzalloc(I40E_HKEY_ARRAY_SIZE,
5044 GFP_KERNEL);
5045 if (!vsi->rss_hkey_user)
5046 return -ENOMEM;
5047 }
5048 memcpy(vsi->rss_hkey_user, key, I40E_HKEY_ARRAY_SIZE);
5049 seed = vsi->rss_hkey_user;
5050 }
5051 if (!vsi->rss_lut_user) {
5052 vsi->rss_lut_user = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL);
5053 if (!vsi->rss_lut_user)
5054 return -ENOMEM;
5055 }
5056
5057 /* Each 32 bits pointed by 'indir' is stored with a lut entry */
5058 if (indir)
5059 for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
5060 vsi->rss_lut_user[i] = (u8)(indir[i]);
5061 else
5062 i40e_fill_rss_lut(pf, vsi->rss_lut_user, I40E_HLUT_ARRAY_SIZE,
5063 vsi->rss_size);
5064
5065 return i40e_config_rss(vsi, seed, vsi->rss_lut_user,
5066 I40E_HLUT_ARRAY_SIZE);
5067 }
5068
5069 /**
5070 * i40e_get_priv_flags - report device private flags
5071 * @dev: network interface device structure
5072 *
5073 * The get string set count and the string set should be matched for each
5074 * flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags
5075 * array.
5076 *
5077 * Returns a u32 bitmap of flags.
5078 **/
i40e_get_priv_flags(struct net_device * dev)5079 static u32 i40e_get_priv_flags(struct net_device *dev)
5080 {
5081 struct i40e_netdev_priv *np = netdev_priv(dev);
5082 struct i40e_vsi *vsi = np->vsi;
5083 struct i40e_pf *pf = vsi->back;
5084 u32 i, j, ret_flags = 0;
5085
5086 for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
5087 const struct i40e_priv_flags *priv_flags;
5088
5089 priv_flags = &i40e_gstrings_priv_flags[i];
5090
5091 if (priv_flags->flag & pf->flags)
5092 ret_flags |= BIT(i);
5093 }
5094
5095 if (pf->hw.pf_id != 0)
5096 return ret_flags;
5097
5098 for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) {
5099 const struct i40e_priv_flags *priv_flags;
5100
5101 priv_flags = &i40e_gl_gstrings_priv_flags[j];
5102
5103 if (priv_flags->flag & pf->flags)
5104 ret_flags |= BIT(i + j);
5105 }
5106
5107 return ret_flags;
5108 }
5109
5110 /**
5111 * i40e_set_priv_flags - set private flags
5112 * @dev: network interface device structure
5113 * @flags: bit flags to be set
5114 **/
i40e_set_priv_flags(struct net_device * dev,u32 flags)5115 static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
5116 {
5117 struct i40e_netdev_priv *np = netdev_priv(dev);
5118 u64 orig_flags, new_flags, changed_flags;
5119 enum i40e_admin_queue_err adq_err;
5120 struct i40e_vsi *vsi = np->vsi;
5121 struct i40e_pf *pf = vsi->back;
5122 u32 reset_needed = 0;
5123 i40e_status status;
5124 u32 i, j;
5125
5126 orig_flags = READ_ONCE(pf->flags);
5127 new_flags = orig_flags;
5128
5129 for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
5130 const struct i40e_priv_flags *priv_flags;
5131
5132 priv_flags = &i40e_gstrings_priv_flags[i];
5133
5134 if (flags & BIT(i))
5135 new_flags |= priv_flags->flag;
5136 else
5137 new_flags &= ~(priv_flags->flag);
5138
5139 /* If this is a read-only flag, it can't be changed */
5140 if (priv_flags->read_only &&
5141 ((orig_flags ^ new_flags) & ~BIT(i)))
5142 return -EOPNOTSUPP;
5143 }
5144
5145 if (pf->hw.pf_id != 0)
5146 goto flags_complete;
5147
5148 for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) {
5149 const struct i40e_priv_flags *priv_flags;
5150
5151 priv_flags = &i40e_gl_gstrings_priv_flags[j];
5152
5153 if (flags & BIT(i + j))
5154 new_flags |= priv_flags->flag;
5155 else
5156 new_flags &= ~(priv_flags->flag);
5157
5158 /* If this is a read-only flag, it can't be changed */
5159 if (priv_flags->read_only &&
5160 ((orig_flags ^ new_flags) & ~BIT(i)))
5161 return -EOPNOTSUPP;
5162 }
5163
5164 flags_complete:
5165 changed_flags = orig_flags ^ new_flags;
5166
5167 if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP)
5168 reset_needed = I40E_PF_RESET_AND_REBUILD_FLAG;
5169 if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
5170 I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED))
5171 reset_needed = BIT(__I40E_PF_RESET_REQUESTED);
5172
5173 /* Before we finalize any flag changes, we need to perform some
5174 * checks to ensure that the changes are supported and safe.
5175 */
5176
5177 /* ATR eviction is not supported on all devices */
5178 if ((new_flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) &&
5179 !(pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE))
5180 return -EOPNOTSUPP;
5181
5182 /* If the driver detected FW LLDP was disabled on init, this flag could
5183 * be set, however we do not support _changing_ the flag:
5184 * - on XL710 if NPAR is enabled or FW API version < 1.7
5185 * - on X722 with FW API version < 1.6
5186 * There are situations where older FW versions/NPAR enabled PFs could
5187 * disable LLDP, however we _must_ not allow the user to enable/disable
5188 * LLDP with this flag on unsupported FW versions.
5189 */
5190 if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
5191 if (!(pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) {
5192 dev_warn(&pf->pdev->dev,
5193 "Device does not support changing FW LLDP\n");
5194 return -EOPNOTSUPP;
5195 }
5196 }
5197
5198 if (changed_flags & I40E_FLAG_RS_FEC &&
5199 pf->hw.device_id != I40E_DEV_ID_25G_SFP28 &&
5200 pf->hw.device_id != I40E_DEV_ID_25G_B) {
5201 dev_warn(&pf->pdev->dev,
5202 "Device does not support changing FEC configuration\n");
5203 return -EOPNOTSUPP;
5204 }
5205
5206 if (changed_flags & I40E_FLAG_BASE_R_FEC &&
5207 pf->hw.device_id != I40E_DEV_ID_25G_SFP28 &&
5208 pf->hw.device_id != I40E_DEV_ID_25G_B &&
5209 pf->hw.device_id != I40E_DEV_ID_KX_X722) {
5210 dev_warn(&pf->pdev->dev,
5211 "Device does not support changing FEC configuration\n");
5212 return -EOPNOTSUPP;
5213 }
5214
5215 /* Process any additional changes needed as a result of flag changes.
5216 * The changed_flags value reflects the list of bits that were
5217 * changed in the code above.
5218 */
5219
5220 /* Flush current ATR settings if ATR was disabled */
5221 if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) &&
5222 !(new_flags & I40E_FLAG_FD_ATR_ENABLED)) {
5223 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
5224 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
5225 }
5226
5227 if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) {
5228 u16 sw_flags = 0, valid_flags = 0;
5229 int ret;
5230
5231 if (!(new_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
5232 sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
5233 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
5234 ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags,
5235 0, NULL);
5236 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
5237 dev_info(&pf->pdev->dev,
5238 "couldn't set switch config bits, err %s aq_err %s\n",
5239 i40e_stat_str(&pf->hw, ret),
5240 i40e_aq_str(&pf->hw,
5241 pf->hw.aq.asq_last_status));
5242 /* not a fatal problem, just keep going */
5243 }
5244 }
5245
5246 if ((changed_flags & I40E_FLAG_RS_FEC) ||
5247 (changed_flags & I40E_FLAG_BASE_R_FEC)) {
5248 u8 fec_cfg = 0;
5249
5250 if (new_flags & I40E_FLAG_RS_FEC &&
5251 new_flags & I40E_FLAG_BASE_R_FEC) {
5252 fec_cfg = I40E_AQ_SET_FEC_AUTO;
5253 } else if (new_flags & I40E_FLAG_RS_FEC) {
5254 fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS |
5255 I40E_AQ_SET_FEC_ABILITY_RS);
5256 } else if (new_flags & I40E_FLAG_BASE_R_FEC) {
5257 fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR |
5258 I40E_AQ_SET_FEC_ABILITY_KR);
5259 }
5260 if (i40e_set_fec_cfg(dev, fec_cfg))
5261 dev_warn(&pf->pdev->dev, "Cannot change FEC config\n");
5262 }
5263
5264 if ((changed_flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
5265 (orig_flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) {
5266 dev_err(&pf->pdev->dev,
5267 "Setting link-down-on-close not supported on this port (because total-port-shutdown is enabled)\n");
5268 return -EOPNOTSUPP;
5269 }
5270
5271 if ((changed_flags & new_flags &
5272 I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
5273 (new_flags & I40E_FLAG_MFP_ENABLED))
5274 dev_warn(&pf->pdev->dev,
5275 "Turning on link-down-on-close flag may affect other partitions\n");
5276
5277 if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
5278 if (new_flags & I40E_FLAG_DISABLE_FW_LLDP) {
5279 #ifdef CONFIG_I40E_DCB
5280 i40e_dcb_sw_default_config(pf);
5281 #endif /* CONFIG_I40E_DCB */
5282 i40e_aq_cfg_lldp_mib_change_event(&pf->hw, false, NULL);
5283 i40e_aq_stop_lldp(&pf->hw, true, false, NULL);
5284 } else {
5285 status = i40e_aq_start_lldp(&pf->hw, false, NULL);
5286 if (status) {
5287 adq_err = pf->hw.aq.asq_last_status;
5288 switch (adq_err) {
5289 case I40E_AQ_RC_EEXIST:
5290 dev_warn(&pf->pdev->dev,
5291 "FW LLDP agent is already running\n");
5292 reset_needed = 0;
5293 break;
5294 case I40E_AQ_RC_EPERM:
5295 dev_warn(&pf->pdev->dev,
5296 "Device configuration forbids SW from starting the LLDP agent.\n");
5297 return -EINVAL;
5298 default:
5299 dev_warn(&pf->pdev->dev,
5300 "Starting FW LLDP agent failed: error: %s, %s\n",
5301 i40e_stat_str(&pf->hw,
5302 status),
5303 i40e_aq_str(&pf->hw,
5304 adq_err));
5305 return -EINVAL;
5306 }
5307 }
5308 }
5309 }
5310
5311 /* Now that we've checked to ensure that the new flags are valid, load
5312 * them into place. Since we only modify flags either (a) during
5313 * initialization or (b) while holding the RTNL lock, we don't need
5314 * anything fancy here.
5315 */
5316 pf->flags = new_flags;
5317
5318 /* Issue reset to cause things to take effect, as additional bits
5319 * are added we will need to create a mask of bits requiring reset
5320 */
5321 if (reset_needed)
5322 i40e_do_reset(pf, reset_needed, true);
5323
5324 return 0;
5325 }
5326
5327 /**
5328 * i40e_get_module_info - get (Q)SFP+ module type info
5329 * @netdev: network interface device structure
5330 * @modinfo: module EEPROM size and layout information structure
5331 **/
i40e_get_module_info(struct net_device * netdev,struct ethtool_modinfo * modinfo)5332 static int i40e_get_module_info(struct net_device *netdev,
5333 struct ethtool_modinfo *modinfo)
5334 {
5335 struct i40e_netdev_priv *np = netdev_priv(netdev);
5336 struct i40e_vsi *vsi = np->vsi;
5337 struct i40e_pf *pf = vsi->back;
5338 struct i40e_hw *hw = &pf->hw;
5339 u32 sff8472_comp = 0;
5340 u32 sff8472_swap = 0;
5341 u32 sff8636_rev = 0;
5342 i40e_status status;
5343 u32 type = 0;
5344
5345 /* Check if firmware supports reading module EEPROM. */
5346 if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
5347 netdev_err(vsi->netdev, "Module EEPROM memory read not supported. Please update the NVM image.\n");
5348 return -EINVAL;
5349 }
5350
5351 status = i40e_update_link_info(hw);
5352 if (status)
5353 return -EIO;
5354
5355 if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
5356 netdev_err(vsi->netdev, "Cannot read module EEPROM memory. No module connected.\n");
5357 return -EINVAL;
5358 }
5359
5360 type = hw->phy.link_info.module_type[0];
5361
5362 switch (type) {
5363 case I40E_MODULE_TYPE_SFP:
5364 status = i40e_aq_get_phy_register(hw,
5365 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
5366 I40E_I2C_EEPROM_DEV_ADDR, true,
5367 I40E_MODULE_SFF_8472_COMP,
5368 &sff8472_comp, NULL);
5369 if (status)
5370 return -EIO;
5371
5372 status = i40e_aq_get_phy_register(hw,
5373 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
5374 I40E_I2C_EEPROM_DEV_ADDR, true,
5375 I40E_MODULE_SFF_8472_SWAP,
5376 &sff8472_swap, NULL);
5377 if (status)
5378 return -EIO;
5379
5380 /* Check if the module requires address swap to access
5381 * the other EEPROM memory page.
5382 */
5383 if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
5384 netdev_warn(vsi->netdev, "Module address swap to access page 0xA2 is not supported.\n");
5385 modinfo->type = ETH_MODULE_SFF_8079;
5386 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
5387 } else if (sff8472_comp == 0x00) {
5388 /* Module is not SFF-8472 compliant */
5389 modinfo->type = ETH_MODULE_SFF_8079;
5390 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
5391 } else if (!(sff8472_swap & I40E_MODULE_SFF_DDM_IMPLEMENTED)) {
5392 /* Module is SFF-8472 compliant but doesn't implement
5393 * Digital Diagnostic Monitoring (DDM).
5394 */
5395 modinfo->type = ETH_MODULE_SFF_8079;
5396 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
5397 } else {
5398 modinfo->type = ETH_MODULE_SFF_8472;
5399 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
5400 }
5401 break;
5402 case I40E_MODULE_TYPE_QSFP_PLUS:
5403 /* Read from memory page 0. */
5404 status = i40e_aq_get_phy_register(hw,
5405 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
5406 0, true,
5407 I40E_MODULE_REVISION_ADDR,
5408 &sff8636_rev, NULL);
5409 if (status)
5410 return -EIO;
5411 /* Determine revision compliance byte */
5412 if (sff8636_rev > 0x02) {
5413 /* Module is SFF-8636 compliant */
5414 modinfo->type = ETH_MODULE_SFF_8636;
5415 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
5416 } else {
5417 modinfo->type = ETH_MODULE_SFF_8436;
5418 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
5419 }
5420 break;
5421 case I40E_MODULE_TYPE_QSFP28:
5422 modinfo->type = ETH_MODULE_SFF_8636;
5423 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
5424 break;
5425 default:
5426 netdev_err(vsi->netdev, "Module type unrecognized\n");
5427 return -EINVAL;
5428 }
5429 return 0;
5430 }
5431
5432 /**
5433 * i40e_get_module_eeprom - fills buffer with (Q)SFP+ module memory contents
5434 * @netdev: network interface device structure
5435 * @ee: EEPROM dump request structure
5436 * @data: buffer to be filled with EEPROM contents
5437 **/
i40e_get_module_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)5438 static int i40e_get_module_eeprom(struct net_device *netdev,
5439 struct ethtool_eeprom *ee,
5440 u8 *data)
5441 {
5442 struct i40e_netdev_priv *np = netdev_priv(netdev);
5443 struct i40e_vsi *vsi = np->vsi;
5444 struct i40e_pf *pf = vsi->back;
5445 struct i40e_hw *hw = &pf->hw;
5446 bool is_sfp = false;
5447 i40e_status status;
5448 u32 value = 0;
5449 int i;
5450
5451 if (!ee || !ee->len || !data)
5452 return -EINVAL;
5453
5454 if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
5455 is_sfp = true;
5456
5457 for (i = 0; i < ee->len; i++) {
5458 u32 offset = i + ee->offset;
5459 u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
5460
5461 /* Check if we need to access the other memory page */
5462 if (is_sfp) {
5463 if (offset >= ETH_MODULE_SFF_8079_LEN) {
5464 offset -= ETH_MODULE_SFF_8079_LEN;
5465 addr = I40E_I2C_EEPROM_DEV_ADDR2;
5466 }
5467 } else {
5468 while (offset >= ETH_MODULE_SFF_8436_LEN) {
5469 /* Compute memory page number and offset. */
5470 offset -= ETH_MODULE_SFF_8436_LEN / 2;
5471 addr++;
5472 }
5473 }
5474
5475 status = i40e_aq_get_phy_register(hw,
5476 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
5477 addr, true, offset, &value, NULL);
5478 if (status)
5479 return -EIO;
5480 data[i] = value;
5481 }
5482 return 0;
5483 }
5484
i40e_get_eee(struct net_device * netdev,struct ethtool_eee * edata)5485 static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
5486 {
5487 struct i40e_netdev_priv *np = netdev_priv(netdev);
5488 struct i40e_aq_get_phy_abilities_resp phy_cfg;
5489 enum i40e_status_code status = 0;
5490 struct i40e_vsi *vsi = np->vsi;
5491 struct i40e_pf *pf = vsi->back;
5492 struct i40e_hw *hw = &pf->hw;
5493
5494 /* Get initial PHY capabilities */
5495 status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_cfg, NULL);
5496 if (status)
5497 return -EAGAIN;
5498
5499 /* Check whether NIC configuration is compatible with Energy Efficient
5500 * Ethernet (EEE) mode.
5501 */
5502 if (phy_cfg.eee_capability == 0)
5503 return -EOPNOTSUPP;
5504
5505 edata->supported = SUPPORTED_Autoneg;
5506 edata->lp_advertised = edata->supported;
5507
5508 /* Get current configuration */
5509 status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_cfg, NULL);
5510 if (status)
5511 return -EAGAIN;
5512
5513 edata->advertised = phy_cfg.eee_capability ? SUPPORTED_Autoneg : 0U;
5514 edata->eee_enabled = !!edata->advertised;
5515 edata->tx_lpi_enabled = pf->stats.tx_lpi_status;
5516
5517 edata->eee_active = pf->stats.tx_lpi_status && pf->stats.rx_lpi_status;
5518
5519 return 0;
5520 }
5521
i40e_is_eee_param_supported(struct net_device * netdev,struct ethtool_eee * edata)5522 static int i40e_is_eee_param_supported(struct net_device *netdev,
5523 struct ethtool_eee *edata)
5524 {
5525 struct i40e_netdev_priv *np = netdev_priv(netdev);
5526 struct i40e_vsi *vsi = np->vsi;
5527 struct i40e_pf *pf = vsi->back;
5528 struct i40e_ethtool_not_used {
5529 u32 value;
5530 const char *name;
5531 } param[] = {
5532 {edata->advertised & ~SUPPORTED_Autoneg, "advertise"},
5533 {edata->tx_lpi_timer, "tx-timer"},
5534 {edata->tx_lpi_enabled != pf->stats.tx_lpi_status, "tx-lpi"}
5535 };
5536 int i;
5537
5538 for (i = 0; i < ARRAY_SIZE(param); i++) {
5539 if (param[i].value) {
5540 netdev_info(netdev,
5541 "EEE setting %s not supported\n",
5542 param[i].name);
5543 return -EOPNOTSUPP;
5544 }
5545 }
5546
5547 return 0;
5548 }
5549
i40e_set_eee(struct net_device * netdev,struct ethtool_eee * edata)5550 static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
5551 {
5552 struct i40e_netdev_priv *np = netdev_priv(netdev);
5553 struct i40e_aq_get_phy_abilities_resp abilities;
5554 enum i40e_status_code status = I40E_SUCCESS;
5555 struct i40e_aq_set_phy_config config;
5556 struct i40e_vsi *vsi = np->vsi;
5557 struct i40e_pf *pf = vsi->back;
5558 struct i40e_hw *hw = &pf->hw;
5559 __le16 eee_capability;
5560
5561 /* Deny parameters we don't support */
5562 if (i40e_is_eee_param_supported(netdev, edata))
5563 return -EOPNOTSUPP;
5564
5565 /* Get initial PHY capabilities */
5566 status = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
5567 NULL);
5568 if (status)
5569 return -EAGAIN;
5570
5571 /* Check whether NIC configuration is compatible with Energy Efficient
5572 * Ethernet (EEE) mode.
5573 */
5574 if (abilities.eee_capability == 0)
5575 return -EOPNOTSUPP;
5576
5577 /* Cache initial EEE capability */
5578 eee_capability = abilities.eee_capability;
5579
5580 /* Get current PHY configuration */
5581 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
5582 NULL);
5583 if (status)
5584 return -EAGAIN;
5585
5586 /* Cache current PHY configuration */
5587 config.phy_type = abilities.phy_type;
5588 config.phy_type_ext = abilities.phy_type_ext;
5589 config.link_speed = abilities.link_speed;
5590 config.abilities = abilities.abilities |
5591 I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
5592 config.eeer = abilities.eeer_val;
5593 config.low_power_ctrl = abilities.d3_lpan;
5594 config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
5595 I40E_AQ_PHY_FEC_CONFIG_MASK;
5596
5597 /* Set desired EEE state */
5598 if (edata->eee_enabled) {
5599 config.eee_capability = eee_capability;
5600 config.eeer |= cpu_to_le32(I40E_PRTPM_EEER_TX_LPI_EN_MASK);
5601 } else {
5602 config.eee_capability = 0;
5603 config.eeer &= cpu_to_le32(~I40E_PRTPM_EEER_TX_LPI_EN_MASK);
5604 }
5605
5606 /* Apply modified PHY configuration */
5607 status = i40e_aq_set_phy_config(hw, &config, NULL);
5608 if (status)
5609 return -EAGAIN;
5610
5611 return 0;
5612 }
5613
5614 static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = {
5615 .get_drvinfo = i40e_get_drvinfo,
5616 .set_eeprom = i40e_set_eeprom,
5617 .get_eeprom_len = i40e_get_eeprom_len,
5618 .get_eeprom = i40e_get_eeprom,
5619 };
5620
5621 static const struct ethtool_ops i40e_ethtool_ops = {
5622 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
5623 ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
5624 ETHTOOL_COALESCE_USE_ADAPTIVE |
5625 ETHTOOL_COALESCE_RX_USECS_HIGH |
5626 ETHTOOL_COALESCE_TX_USECS_HIGH,
5627 .get_drvinfo = i40e_get_drvinfo,
5628 .get_regs_len = i40e_get_regs_len,
5629 .get_regs = i40e_get_regs,
5630 .nway_reset = i40e_nway_reset,
5631 .get_link = ethtool_op_get_link,
5632 .get_wol = i40e_get_wol,
5633 .set_wol = i40e_set_wol,
5634 .set_eeprom = i40e_set_eeprom,
5635 .get_eeprom_len = i40e_get_eeprom_len,
5636 .get_eeprom = i40e_get_eeprom,
5637 .get_ringparam = i40e_get_ringparam,
5638 .set_ringparam = i40e_set_ringparam,
5639 .get_pauseparam = i40e_get_pauseparam,
5640 .set_pauseparam = i40e_set_pauseparam,
5641 .get_msglevel = i40e_get_msglevel,
5642 .set_msglevel = i40e_set_msglevel,
5643 .get_rxnfc = i40e_get_rxnfc,
5644 .set_rxnfc = i40e_set_rxnfc,
5645 .self_test = i40e_diag_test,
5646 .get_strings = i40e_get_strings,
5647 .get_eee = i40e_get_eee,
5648 .set_eee = i40e_set_eee,
5649 .set_phys_id = i40e_set_phys_id,
5650 .get_sset_count = i40e_get_sset_count,
5651 .get_ethtool_stats = i40e_get_ethtool_stats,
5652 .get_coalesce = i40e_get_coalesce,
5653 .set_coalesce = i40e_set_coalesce,
5654 .get_rxfh_key_size = i40e_get_rxfh_key_size,
5655 .get_rxfh_indir_size = i40e_get_rxfh_indir_size,
5656 .get_rxfh = i40e_get_rxfh,
5657 .set_rxfh = i40e_set_rxfh,
5658 .get_channels = i40e_get_channels,
5659 .set_channels = i40e_set_channels,
5660 .get_module_info = i40e_get_module_info,
5661 .get_module_eeprom = i40e_get_module_eeprom,
5662 .get_ts_info = i40e_get_ts_info,
5663 .get_priv_flags = i40e_get_priv_flags,
5664 .set_priv_flags = i40e_set_priv_flags,
5665 .get_per_queue_coalesce = i40e_get_per_queue_coalesce,
5666 .set_per_queue_coalesce = i40e_set_per_queue_coalesce,
5667 .get_link_ksettings = i40e_get_link_ksettings,
5668 .set_link_ksettings = i40e_set_link_ksettings,
5669 .get_fecparam = i40e_get_fec_param,
5670 .set_fecparam = i40e_set_fec_param,
5671 .flash_device = i40e_ddp_flash,
5672 };
5673
i40e_set_ethtool_ops(struct net_device * netdev)5674 void i40e_set_ethtool_ops(struct net_device *netdev)
5675 {
5676 struct i40e_netdev_priv *np = netdev_priv(netdev);
5677 struct i40e_pf *pf = np->vsi->back;
5678
5679 if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
5680 netdev->ethtool_ops = &i40e_ethtool_ops;
5681 else
5682 netdev->ethtool_ops = &i40e_ethtool_recovery_mode_ops;
5683 }
5684