1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */
3 
4 #include <linux/pci.h>
5 #include <linux/phy.h>
6 #include <linux/ethtool.h>
7 
8 #include "wx_type.h"
9 #include "wx_ethtool.h"
10 #include "wx_hw.h"
11 #include "wx_lib.h"
12 
13 struct wx_stats {
14 	char stat_string[ETH_GSTRING_LEN];
15 	size_t sizeof_stat;
16 	off_t stat_offset;
17 };
18 
19 #define WX_STAT(str, m) { \
20 		.stat_string = str, \
21 		.sizeof_stat = sizeof(((struct wx *)0)->m), \
22 		.stat_offset = offsetof(struct wx, m) }
23 
24 static const struct wx_stats wx_gstrings_stats[] = {
25 	WX_STAT("rx_dma_pkts", stats.gprc),
26 	WX_STAT("tx_dma_pkts", stats.gptc),
27 	WX_STAT("rx_dma_bytes", stats.gorc),
28 	WX_STAT("tx_dma_bytes", stats.gotc),
29 	WX_STAT("rx_total_pkts", stats.tpr),
30 	WX_STAT("tx_total_pkts", stats.tpt),
31 	WX_STAT("rx_long_length_count", stats.roc),
32 	WX_STAT("rx_short_length_count", stats.ruc),
33 	WX_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
34 	WX_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
35 	WX_STAT("os2bmc_tx_by_host", stats.o2bspc),
36 	WX_STAT("os2bmc_rx_by_host", stats.b2ogprc),
37 	WX_STAT("rx_no_dma_resources", stats.rdmdrop),
38 	WX_STAT("tx_busy", tx_busy),
39 	WX_STAT("non_eop_descs", non_eop_descs),
40 	WX_STAT("tx_restart_queue", restart_queue),
41 	WX_STAT("rx_csum_offload_good_count", hw_csum_rx_good),
42 	WX_STAT("rx_csum_offload_errors", hw_csum_rx_error),
43 	WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
44 };
45 
46 /* drivers allocates num_tx_queues and num_rx_queues symmetrically so
47  * we set the num_rx_queues to evaluate to num_tx_queues. This is
48  * used because we do not have a good way to get the max number of
49  * rx queues with CONFIG_RPS disabled.
50  */
51 #define WX_NUM_RX_QUEUES netdev->num_tx_queues
52 #define WX_NUM_TX_QUEUES netdev->num_tx_queues
53 
54 #define WX_QUEUE_STATS_LEN ( \
55 		(WX_NUM_TX_QUEUES + WX_NUM_RX_QUEUES) * \
56 		(sizeof(struct wx_queue_stats) / sizeof(u64)))
57 #define WX_GLOBAL_STATS_LEN  ARRAY_SIZE(wx_gstrings_stats)
58 #define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN)
59 
60 int wx_get_sset_count(struct net_device *netdev, int sset)
61 {
62 	switch (sset) {
63 	case ETH_SS_STATS:
64 		return WX_STATS_LEN;
65 	default:
66 		return -EOPNOTSUPP;
67 	}
68 }
69 EXPORT_SYMBOL(wx_get_sset_count);
70 
71 void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
72 {
73 	u8 *p = data;
74 	int i;
75 
76 	switch (stringset) {
77 	case ETH_SS_STATS:
78 		for (i = 0; i < WX_GLOBAL_STATS_LEN; i++)
79 			ethtool_puts(&p, wx_gstrings_stats[i].stat_string);
80 		for (i = 0; i < netdev->num_tx_queues; i++) {
81 			ethtool_sprintf(&p, "tx_queue_%u_packets", i);
82 			ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
83 		}
84 		for (i = 0; i < WX_NUM_RX_QUEUES; i++) {
85 			ethtool_sprintf(&p, "rx_queue_%u_packets", i);
86 			ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
87 		}
88 		break;
89 	}
90 }
91 EXPORT_SYMBOL(wx_get_strings);
92 
93 void wx_get_ethtool_stats(struct net_device *netdev,
94 			  struct ethtool_stats *stats, u64 *data)
95 {
96 	struct wx *wx = netdev_priv(netdev);
97 	struct wx_ring *ring;
98 	unsigned int start;
99 	int i, j;
100 	char *p;
101 
102 	wx_update_stats(wx);
103 
104 	for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) {
105 		p = (char *)wx + wx_gstrings_stats[i].stat_offset;
106 		data[i] = (wx_gstrings_stats[i].sizeof_stat ==
107 			   sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
108 	}
109 
110 	for (j = 0; j < netdev->num_tx_queues; j++) {
111 		ring = wx->tx_ring[j];
112 		if (!ring) {
113 			data[i++] = 0;
114 			data[i++] = 0;
115 			continue;
116 		}
117 
118 		do {
119 			start = u64_stats_fetch_begin(&ring->syncp);
120 			data[i] = ring->stats.packets;
121 			data[i + 1] = ring->stats.bytes;
122 		} while (u64_stats_fetch_retry(&ring->syncp, start));
123 		i += 2;
124 	}
125 	for (j = 0; j < WX_NUM_RX_QUEUES; j++) {
126 		ring = wx->rx_ring[j];
127 		if (!ring) {
128 			data[i++] = 0;
129 			data[i++] = 0;
130 			continue;
131 		}
132 
133 		do {
134 			start = u64_stats_fetch_begin(&ring->syncp);
135 			data[i] = ring->stats.packets;
136 			data[i + 1] = ring->stats.bytes;
137 		} while (u64_stats_fetch_retry(&ring->syncp, start));
138 		i += 2;
139 	}
140 }
141 EXPORT_SYMBOL(wx_get_ethtool_stats);
142 
143 void wx_get_mac_stats(struct net_device *netdev,
144 		      struct ethtool_eth_mac_stats *mac_stats)
145 {
146 	struct wx *wx = netdev_priv(netdev);
147 	struct wx_hw_stats *hwstats;
148 
149 	wx_update_stats(wx);
150 
151 	hwstats = &wx->stats;
152 	mac_stats->MulticastFramesXmittedOK = hwstats->mptc;
153 	mac_stats->BroadcastFramesXmittedOK = hwstats->bptc;
154 	mac_stats->MulticastFramesReceivedOK = hwstats->mprc;
155 	mac_stats->BroadcastFramesReceivedOK = hwstats->bprc;
156 }
157 EXPORT_SYMBOL(wx_get_mac_stats);
158 
159 void wx_get_pause_stats(struct net_device *netdev,
160 			struct ethtool_pause_stats *stats)
161 {
162 	struct wx *wx = netdev_priv(netdev);
163 	struct wx_hw_stats *hwstats;
164 
165 	wx_update_stats(wx);
166 
167 	hwstats = &wx->stats;
168 	stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc;
169 	stats->rx_pause_frames = hwstats->lxonoffrxc;
170 }
171 EXPORT_SYMBOL(wx_get_pause_stats);
172 
173 void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
174 {
175 	struct wx *wx = netdev_priv(netdev);
176 
177 	strscpy(info->driver, wx->driver_name, sizeof(info->driver));
178 	strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version));
179 	strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info));
180 	if (wx->num_tx_queues <= WX_NUM_TX_QUEUES) {
181 		info->n_stats = WX_STATS_LEN -
182 				   (WX_NUM_TX_QUEUES - wx->num_tx_queues) *
183 				   (sizeof(struct wx_queue_stats) / sizeof(u64)) * 2;
184 	} else {
185 		info->n_stats = WX_STATS_LEN;
186 	}
187 }
188 EXPORT_SYMBOL(wx_get_drvinfo);
189 
190 int wx_nway_reset(struct net_device *netdev)
191 {
192 	struct wx *wx = netdev_priv(netdev);
193 
194 	return phylink_ethtool_nway_reset(wx->phylink);
195 }
196 EXPORT_SYMBOL(wx_nway_reset);
197 
198 int wx_get_link_ksettings(struct net_device *netdev,
199 			  struct ethtool_link_ksettings *cmd)
200 {
201 	struct wx *wx = netdev_priv(netdev);
202 
203 	return phylink_ethtool_ksettings_get(wx->phylink, cmd);
204 }
205 EXPORT_SYMBOL(wx_get_link_ksettings);
206 
207 int wx_set_link_ksettings(struct net_device *netdev,
208 			  const struct ethtool_link_ksettings *cmd)
209 {
210 	struct wx *wx = netdev_priv(netdev);
211 
212 	return phylink_ethtool_ksettings_set(wx->phylink, cmd);
213 }
214 EXPORT_SYMBOL(wx_set_link_ksettings);
215 
216 void wx_get_pauseparam(struct net_device *netdev,
217 		       struct ethtool_pauseparam *pause)
218 {
219 	struct wx *wx = netdev_priv(netdev);
220 
221 	phylink_ethtool_get_pauseparam(wx->phylink, pause);
222 }
223 EXPORT_SYMBOL(wx_get_pauseparam);
224 
225 int wx_set_pauseparam(struct net_device *netdev,
226 		      struct ethtool_pauseparam *pause)
227 {
228 	struct wx *wx = netdev_priv(netdev);
229 
230 	return phylink_ethtool_set_pauseparam(wx->phylink, pause);
231 }
232 EXPORT_SYMBOL(wx_set_pauseparam);
233 
234 void wx_get_ringparam(struct net_device *netdev,
235 		      struct ethtool_ringparam *ring,
236 		      struct kernel_ethtool_ringparam *kernel_ring,
237 		      struct netlink_ext_ack *extack)
238 {
239 	struct wx *wx = netdev_priv(netdev);
240 
241 	ring->rx_max_pending = WX_MAX_RXD;
242 	ring->tx_max_pending = WX_MAX_TXD;
243 	ring->rx_mini_max_pending = 0;
244 	ring->rx_jumbo_max_pending = 0;
245 	ring->rx_pending = wx->rx_ring_count;
246 	ring->tx_pending = wx->tx_ring_count;
247 	ring->rx_mini_pending = 0;
248 	ring->rx_jumbo_pending = 0;
249 }
250 EXPORT_SYMBOL(wx_get_ringparam);
251 
252 int wx_get_coalesce(struct net_device *netdev,
253 		    struct ethtool_coalesce *ec,
254 		    struct kernel_ethtool_coalesce *kernel_coal,
255 		    struct netlink_ext_ack *extack)
256 {
257 	struct wx *wx = netdev_priv(netdev);
258 
259 	ec->tx_max_coalesced_frames_irq = wx->tx_work_limit;
260 	/* only valid if in constant ITR mode */
261 	if (wx->rx_itr_setting <= 1)
262 		ec->rx_coalesce_usecs = wx->rx_itr_setting;
263 	else
264 		ec->rx_coalesce_usecs = wx->rx_itr_setting >> 2;
265 
266 	/* if in mixed tx/rx queues per vector mode, report only rx settings */
267 	if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
268 		return 0;
269 
270 	/* only valid if in constant ITR mode */
271 	if (wx->tx_itr_setting <= 1)
272 		ec->tx_coalesce_usecs = wx->tx_itr_setting;
273 	else
274 		ec->tx_coalesce_usecs = wx->tx_itr_setting >> 2;
275 
276 	return 0;
277 }
278 EXPORT_SYMBOL(wx_get_coalesce);
279 
280 int wx_set_coalesce(struct net_device *netdev,
281 		    struct ethtool_coalesce *ec,
282 		    struct kernel_ethtool_coalesce *kernel_coal,
283 		    struct netlink_ext_ack *extack)
284 {
285 	struct wx *wx = netdev_priv(netdev);
286 	u16 tx_itr_param, rx_itr_param;
287 	struct wx_q_vector *q_vector;
288 	u16 max_eitr;
289 	int i;
290 
291 	if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) {
292 		/* reject Tx specific changes in case of mixed RxTx vectors */
293 		if (ec->tx_coalesce_usecs)
294 			return -EOPNOTSUPP;
295 	}
296 
297 	if (ec->tx_max_coalesced_frames_irq)
298 		wx->tx_work_limit = ec->tx_max_coalesced_frames_irq;
299 
300 	if (wx->mac.type == wx_mac_sp)
301 		max_eitr = WX_SP_MAX_EITR;
302 	else
303 		max_eitr = WX_EM_MAX_EITR;
304 
305 	if ((ec->rx_coalesce_usecs > (max_eitr >> 2)) ||
306 	    (ec->tx_coalesce_usecs > (max_eitr >> 2)))
307 		return -EINVAL;
308 
309 	if (ec->rx_coalesce_usecs > 1)
310 		wx->rx_itr_setting = ec->rx_coalesce_usecs << 2;
311 	else
312 		wx->rx_itr_setting = ec->rx_coalesce_usecs;
313 
314 	if (wx->rx_itr_setting == 1)
315 		rx_itr_param = WX_20K_ITR;
316 	else
317 		rx_itr_param = wx->rx_itr_setting;
318 
319 	if (ec->tx_coalesce_usecs > 1)
320 		wx->tx_itr_setting = ec->tx_coalesce_usecs << 2;
321 	else
322 		wx->tx_itr_setting = ec->tx_coalesce_usecs;
323 
324 	if (wx->tx_itr_setting == 1) {
325 		if (wx->mac.type == wx_mac_sp)
326 			tx_itr_param = WX_12K_ITR;
327 		else
328 			tx_itr_param = WX_20K_ITR;
329 	} else {
330 		tx_itr_param = wx->tx_itr_setting;
331 	}
332 
333 	/* mixed Rx/Tx */
334 	if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
335 		wx->tx_itr_setting = wx->rx_itr_setting;
336 
337 	for (i = 0; i < wx->num_q_vectors; i++) {
338 		q_vector = wx->q_vector[i];
339 		if (q_vector->tx.count && !q_vector->rx.count)
340 			/* tx only */
341 			q_vector->itr = tx_itr_param;
342 		else
343 			/* rx only or mixed */
344 			q_vector->itr = rx_itr_param;
345 		wx_write_eitr(q_vector);
346 	}
347 
348 	return 0;
349 }
350 EXPORT_SYMBOL(wx_set_coalesce);
351 
352 static unsigned int wx_max_channels(struct wx *wx)
353 {
354 	unsigned int max_combined;
355 
356 	if (!wx->msix_q_entries) {
357 		/* We only support one q_vector without MSI-X */
358 		max_combined = 1;
359 	} else {
360 		/* support up to max allowed queues with RSS */
361 		if (wx->mac.type == wx_mac_sp)
362 			max_combined = 63;
363 		else
364 			max_combined = 8;
365 	}
366 
367 	return max_combined;
368 }
369 
370 void wx_get_channels(struct net_device *dev,
371 		     struct ethtool_channels *ch)
372 {
373 	struct wx *wx = netdev_priv(dev);
374 
375 	/* report maximum channels */
376 	ch->max_combined = wx_max_channels(wx);
377 
378 	/* report info for other vector */
379 	if (wx->msix_q_entries) {
380 		ch->max_other = 1;
381 		ch->other_count = 1;
382 	}
383 
384 	/* record RSS queues */
385 	ch->combined_count = wx->ring_feature[RING_F_RSS].indices;
386 }
387 EXPORT_SYMBOL(wx_get_channels);
388 
389 int wx_set_channels(struct net_device *dev,
390 		    struct ethtool_channels *ch)
391 {
392 	unsigned int count = ch->combined_count;
393 	struct wx *wx = netdev_priv(dev);
394 
395 	/* verify other_count has not changed */
396 	if (ch->other_count != 1)
397 		return -EINVAL;
398 
399 	/* verify the number of channels does not exceed hardware limits */
400 	if (count > wx_max_channels(wx))
401 		return -EINVAL;
402 
403 	wx->ring_feature[RING_F_RSS].limit = count;
404 
405 	return 0;
406 }
407 EXPORT_SYMBOL(wx_set_channels);
408 
409 u32 wx_get_msglevel(struct net_device *netdev)
410 {
411 	struct wx *wx = netdev_priv(netdev);
412 
413 	return wx->msg_enable;
414 }
415 EXPORT_SYMBOL(wx_get_msglevel);
416 
417 void wx_set_msglevel(struct net_device *netdev, u32 data)
418 {
419 	struct wx *wx = netdev_priv(netdev);
420 
421 	wx->msg_enable = data;
422 }
423 EXPORT_SYMBOL(wx_set_msglevel);
424