1 /*
2  * Copyright 2013 Cisco Systems, Inc.  All rights reserved.
3  *
4  * This program is free software; you may redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; version 2 of the License.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15  * SOFTWARE.
16  *
17  */
18 
19 #include <linux/netdevice.h>
20 #include <linux/ethtool.h>
21 #include <linux/net_tstamp.h>
22 
23 #include "enic_res.h"
24 #include "enic.h"
25 #include "enic_dev.h"
26 #include "enic_clsf.h"
27 #include "vnic_rss.h"
28 #include "vnic_stats.h"
29 
30 struct enic_stat {
31 	char name[ETH_GSTRING_LEN];
32 	unsigned int index;
33 };
34 
35 #define ENIC_TX_STAT(stat) { \
36 	.name = #stat, \
37 	.index = offsetof(struct vnic_tx_stats, stat) / sizeof(u64) \
38 }
39 
40 #define ENIC_RX_STAT(stat) { \
41 	.name = #stat, \
42 	.index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
43 }
44 
45 #define ENIC_GEN_STAT(stat) { \
46 	.name = #stat, \
47 	.index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
48 }
49 
50 static const struct enic_stat enic_tx_stats[] = {
51 	ENIC_TX_STAT(tx_frames_ok),
52 	ENIC_TX_STAT(tx_unicast_frames_ok),
53 	ENIC_TX_STAT(tx_multicast_frames_ok),
54 	ENIC_TX_STAT(tx_broadcast_frames_ok),
55 	ENIC_TX_STAT(tx_bytes_ok),
56 	ENIC_TX_STAT(tx_unicast_bytes_ok),
57 	ENIC_TX_STAT(tx_multicast_bytes_ok),
58 	ENIC_TX_STAT(tx_broadcast_bytes_ok),
59 	ENIC_TX_STAT(tx_drops),
60 	ENIC_TX_STAT(tx_errors),
61 	ENIC_TX_STAT(tx_tso),
62 };
63 
64 static const struct enic_stat enic_rx_stats[] = {
65 	ENIC_RX_STAT(rx_frames_ok),
66 	ENIC_RX_STAT(rx_frames_total),
67 	ENIC_RX_STAT(rx_unicast_frames_ok),
68 	ENIC_RX_STAT(rx_multicast_frames_ok),
69 	ENIC_RX_STAT(rx_broadcast_frames_ok),
70 	ENIC_RX_STAT(rx_bytes_ok),
71 	ENIC_RX_STAT(rx_unicast_bytes_ok),
72 	ENIC_RX_STAT(rx_multicast_bytes_ok),
73 	ENIC_RX_STAT(rx_broadcast_bytes_ok),
74 	ENIC_RX_STAT(rx_drop),
75 	ENIC_RX_STAT(rx_no_bufs),
76 	ENIC_RX_STAT(rx_errors),
77 	ENIC_RX_STAT(rx_rss),
78 	ENIC_RX_STAT(rx_crc_errors),
79 	ENIC_RX_STAT(rx_frames_64),
80 	ENIC_RX_STAT(rx_frames_127),
81 	ENIC_RX_STAT(rx_frames_255),
82 	ENIC_RX_STAT(rx_frames_511),
83 	ENIC_RX_STAT(rx_frames_1023),
84 	ENIC_RX_STAT(rx_frames_1518),
85 	ENIC_RX_STAT(rx_frames_to_max),
86 };
87 
88 static const struct enic_stat enic_gen_stats[] = {
89 	ENIC_GEN_STAT(dma_map_error),
90 };
91 
92 static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
93 static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
94 static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats);
95 
enic_intr_coal_set_rx(struct enic * enic,u32 timer)96 static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
97 {
98 	int i;
99 	int intr;
100 
101 	for (i = 0; i < enic->rq_count; i++) {
102 		intr = enic_msix_rq_intr(enic, i);
103 		vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
104 	}
105 }
106 
enic_get_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * ecmd)107 static int enic_get_ksettings(struct net_device *netdev,
108 			      struct ethtool_link_ksettings *ecmd)
109 {
110 	struct enic *enic = netdev_priv(netdev);
111 	struct ethtool_link_settings *base = &ecmd->base;
112 
113 	ethtool_link_ksettings_add_link_mode(ecmd, supported,
114 					     10000baseT_Full);
115 	ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
116 	ethtool_link_ksettings_add_link_mode(ecmd, advertising,
117 					     10000baseT_Full);
118 	ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE);
119 	base->port = PORT_FIBRE;
120 
121 	if (netif_carrier_ok(netdev)) {
122 		base->speed = vnic_dev_port_speed(enic->vdev);
123 		base->duplex = DUPLEX_FULL;
124 	} else {
125 		base->speed = SPEED_UNKNOWN;
126 		base->duplex = DUPLEX_UNKNOWN;
127 	}
128 
129 	base->autoneg = AUTONEG_DISABLE;
130 
131 	return 0;
132 }
133 
enic_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)134 static void enic_get_drvinfo(struct net_device *netdev,
135 	struct ethtool_drvinfo *drvinfo)
136 {
137 	struct enic *enic = netdev_priv(netdev);
138 	struct vnic_devcmd_fw_info *fw_info;
139 	int err;
140 
141 	err = enic_dev_fw_info(enic, &fw_info);
142 	/* return only when pci_zalloc_consistent fails in vnic_dev_fw_info
143 	 * For other failures, like devcmd failure, we return previously
144 	 * recorded info.
145 	 */
146 	if (err == -ENOMEM)
147 		return;
148 
149 	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
150 	strlcpy(drvinfo->fw_version, fw_info->fw_version,
151 		sizeof(drvinfo->fw_version));
152 	strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
153 		sizeof(drvinfo->bus_info));
154 }
155 
enic_get_strings(struct net_device * netdev,u32 stringset,u8 * data)156 static void enic_get_strings(struct net_device *netdev, u32 stringset,
157 	u8 *data)
158 {
159 	unsigned int i;
160 
161 	switch (stringset) {
162 	case ETH_SS_STATS:
163 		for (i = 0; i < enic_n_tx_stats; i++) {
164 			memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
165 			data += ETH_GSTRING_LEN;
166 		}
167 		for (i = 0; i < enic_n_rx_stats; i++) {
168 			memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
169 			data += ETH_GSTRING_LEN;
170 		}
171 		for (i = 0; i < enic_n_gen_stats; i++) {
172 			memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
173 			data += ETH_GSTRING_LEN;
174 		}
175 		break;
176 	}
177 }
178 
enic_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring)179 static void enic_get_ringparam(struct net_device *netdev,
180 			       struct ethtool_ringparam *ring)
181 {
182 	struct enic *enic = netdev_priv(netdev);
183 	struct vnic_enet_config *c = &enic->config;
184 
185 	ring->rx_max_pending = ENIC_MAX_RQ_DESCS;
186 	ring->rx_pending = c->rq_desc_count;
187 	ring->tx_max_pending = ENIC_MAX_WQ_DESCS;
188 	ring->tx_pending = c->wq_desc_count;
189 }
190 
enic_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring)191 static int enic_set_ringparam(struct net_device *netdev,
192 			      struct ethtool_ringparam *ring)
193 {
194 	struct enic *enic = netdev_priv(netdev);
195 	struct vnic_enet_config *c = &enic->config;
196 	int running = netif_running(netdev);
197 	unsigned int rx_pending;
198 	unsigned int tx_pending;
199 	int err = 0;
200 
201 	if (ring->rx_mini_max_pending || ring->rx_mini_pending) {
202 		netdev_info(netdev,
203 			    "modifying mini ring params is not supported");
204 		return -EINVAL;
205 	}
206 	if (ring->rx_jumbo_max_pending || ring->rx_jumbo_pending) {
207 		netdev_info(netdev,
208 			    "modifying jumbo ring params is not supported");
209 		return -EINVAL;
210 	}
211 	rx_pending = c->rq_desc_count;
212 	tx_pending = c->wq_desc_count;
213 	if (ring->rx_pending > ENIC_MAX_RQ_DESCS ||
214 	    ring->rx_pending < ENIC_MIN_RQ_DESCS) {
215 		netdev_info(netdev, "rx pending (%u) not in range [%u,%u]",
216 			    ring->rx_pending, ENIC_MIN_RQ_DESCS,
217 			    ENIC_MAX_RQ_DESCS);
218 		return -EINVAL;
219 	}
220 	if (ring->tx_pending > ENIC_MAX_WQ_DESCS ||
221 	    ring->tx_pending < ENIC_MIN_WQ_DESCS) {
222 		netdev_info(netdev, "tx pending (%u) not in range [%u,%u]",
223 			    ring->tx_pending, ENIC_MIN_WQ_DESCS,
224 			    ENIC_MAX_WQ_DESCS);
225 		return -EINVAL;
226 	}
227 	if (running)
228 		dev_close(netdev);
229 	c->rq_desc_count =
230 		ring->rx_pending & 0xffffffe0; /* must be aligned to groups of 32 */
231 	c->wq_desc_count =
232 		ring->tx_pending & 0xffffffe0; /* must be aligned to groups of 32 */
233 	enic_free_vnic_resources(enic);
234 	err = enic_alloc_vnic_resources(enic);
235 	if (err) {
236 		netdev_err(netdev,
237 			   "Failed to alloc vNIC resources, aborting\n");
238 		enic_free_vnic_resources(enic);
239 		goto err_out;
240 	}
241 	enic_init_vnic_resources(enic);
242 	if (running) {
243 		err = dev_open(netdev, NULL);
244 		if (err)
245 			goto err_out;
246 	}
247 	return 0;
248 err_out:
249 	c->rq_desc_count = rx_pending;
250 	c->wq_desc_count = tx_pending;
251 	return err;
252 }
253 
enic_get_sset_count(struct net_device * netdev,int sset)254 static int enic_get_sset_count(struct net_device *netdev, int sset)
255 {
256 	switch (sset) {
257 	case ETH_SS_STATS:
258 		return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats;
259 	default:
260 		return -EOPNOTSUPP;
261 	}
262 }
263 
enic_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)264 static void enic_get_ethtool_stats(struct net_device *netdev,
265 	struct ethtool_stats *stats, u64 *data)
266 {
267 	struct enic *enic = netdev_priv(netdev);
268 	struct vnic_stats *vstats;
269 	unsigned int i;
270 	int err;
271 
272 	err = enic_dev_stats_dump(enic, &vstats);
273 	/* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
274 	 * For other failures, like devcmd failure, we return previously
275 	 * recorded stats.
276 	 */
277 	if (err == -ENOMEM)
278 		return;
279 
280 	for (i = 0; i < enic_n_tx_stats; i++)
281 		*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
282 	for (i = 0; i < enic_n_rx_stats; i++)
283 		*(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
284 	for (i = 0; i < enic_n_gen_stats; i++)
285 		*(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
286 }
287 
enic_get_msglevel(struct net_device * netdev)288 static u32 enic_get_msglevel(struct net_device *netdev)
289 {
290 	struct enic *enic = netdev_priv(netdev);
291 	return enic->msg_enable;
292 }
293 
enic_set_msglevel(struct net_device * netdev,u32 value)294 static void enic_set_msglevel(struct net_device *netdev, u32 value)
295 {
296 	struct enic *enic = netdev_priv(netdev);
297 	enic->msg_enable = value;
298 }
299 
enic_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ecmd)300 static int enic_get_coalesce(struct net_device *netdev,
301 	struct ethtool_coalesce *ecmd)
302 {
303 	struct enic *enic = netdev_priv(netdev);
304 	struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
305 
306 	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
307 		ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
308 	ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
309 	if (rxcoal->use_adaptive_rx_coalesce)
310 		ecmd->use_adaptive_rx_coalesce = 1;
311 	ecmd->rx_coalesce_usecs_low = rxcoal->small_pkt_range_start;
312 	ecmd->rx_coalesce_usecs_high = rxcoal->range_end;
313 
314 	return 0;
315 }
316 
enic_coalesce_valid(struct enic * enic,struct ethtool_coalesce * ec)317 static int enic_coalesce_valid(struct enic *enic,
318 			       struct ethtool_coalesce *ec)
319 {
320 	u32 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
321 	u32 rx_coalesce_usecs_high = min_t(u32, coalesce_usecs_max,
322 					   ec->rx_coalesce_usecs_high);
323 	u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max,
324 					  ec->rx_coalesce_usecs_low);
325 
326 	if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) &&
327 	    ec->tx_coalesce_usecs)
328 		return -EINVAL;
329 
330 	if ((ec->tx_coalesce_usecs > coalesce_usecs_max)	||
331 	    (ec->rx_coalesce_usecs > coalesce_usecs_max)	||
332 	    (ec->rx_coalesce_usecs_low > coalesce_usecs_max)	||
333 	    (ec->rx_coalesce_usecs_high > coalesce_usecs_max))
334 		netdev_info(enic->netdev, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n",
335 			    coalesce_usecs_max);
336 
337 	if (ec->rx_coalesce_usecs_high &&
338 	    (rx_coalesce_usecs_high <
339 	     rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
340 		return -EINVAL;
341 
342 	return 0;
343 }
344 
enic_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ecmd)345 static int enic_set_coalesce(struct net_device *netdev,
346 	struct ethtool_coalesce *ecmd)
347 {
348 	struct enic *enic = netdev_priv(netdev);
349 	u32 tx_coalesce_usecs;
350 	u32 rx_coalesce_usecs;
351 	u32 rx_coalesce_usecs_low;
352 	u32 rx_coalesce_usecs_high;
353 	u32 coalesce_usecs_max;
354 	unsigned int i, intr;
355 	int ret;
356 	struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
357 
358 	ret = enic_coalesce_valid(enic, ecmd);
359 	if (ret)
360 		return ret;
361 	coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
362 	tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
363 				  coalesce_usecs_max);
364 	rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
365 				  coalesce_usecs_max);
366 
367 	rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low,
368 				      coalesce_usecs_max);
369 	rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
370 				       coalesce_usecs_max);
371 
372 	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
373 		for (i = 0; i < enic->wq_count; i++) {
374 			intr = enic_msix_wq_intr(enic, i);
375 			vnic_intr_coalescing_timer_set(&enic->intr[intr],
376 						       tx_coalesce_usecs);
377 		}
378 		enic->tx_coalesce_usecs = tx_coalesce_usecs;
379 	}
380 	rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce;
381 	if (!rxcoal->use_adaptive_rx_coalesce)
382 		enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
383 	if (ecmd->rx_coalesce_usecs_high) {
384 		rxcoal->range_end = rx_coalesce_usecs_high;
385 		rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
386 		rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
387 						ENIC_AIC_LARGE_PKT_DIFF;
388 	}
389 
390 	enic->rx_coalesce_usecs = rx_coalesce_usecs;
391 
392 	return 0;
393 }
394 
enic_grxclsrlall(struct enic * enic,struct ethtool_rxnfc * cmd,u32 * rule_locs)395 static int enic_grxclsrlall(struct enic *enic, struct ethtool_rxnfc *cmd,
396 			    u32 *rule_locs)
397 {
398 	int j, ret = 0, cnt = 0;
399 
400 	cmd->data = enic->rfs_h.max - enic->rfs_h.free;
401 	for (j = 0; j < (1 << ENIC_RFS_FLW_BITSHIFT); j++) {
402 		struct hlist_head *hhead;
403 		struct hlist_node *tmp;
404 		struct enic_rfs_fltr_node *n;
405 
406 		hhead = &enic->rfs_h.ht_head[j];
407 		hlist_for_each_entry_safe(n, tmp, hhead, node) {
408 			if (cnt == cmd->rule_cnt)
409 				return -EMSGSIZE;
410 			rule_locs[cnt] = n->fltr_id;
411 			cnt++;
412 		}
413 	}
414 	cmd->rule_cnt = cnt;
415 
416 	return ret;
417 }
418 
enic_grxclsrule(struct enic * enic,struct ethtool_rxnfc * cmd)419 static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
420 {
421 	struct ethtool_rx_flow_spec *fsp =
422 				(struct ethtool_rx_flow_spec *)&cmd->fs;
423 	struct enic_rfs_fltr_node *n;
424 
425 	n = htbl_fltr_search(enic, (u16)fsp->location);
426 	if (!n)
427 		return -EINVAL;
428 	switch (n->keys.basic.ip_proto) {
429 	case IPPROTO_TCP:
430 		fsp->flow_type = TCP_V4_FLOW;
431 		break;
432 	case IPPROTO_UDP:
433 		fsp->flow_type = UDP_V4_FLOW;
434 		break;
435 	default:
436 		return -EINVAL;
437 	}
438 
439 	fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys);
440 	fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0;
441 
442 	fsp->h_u.tcp_ip4_spec.ip4dst = flow_get_u32_dst(&n->keys);
443 	fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0;
444 
445 	fsp->h_u.tcp_ip4_spec.psrc = n->keys.ports.src;
446 	fsp->m_u.tcp_ip4_spec.psrc = (__u16)~0;
447 
448 	fsp->h_u.tcp_ip4_spec.pdst = n->keys.ports.dst;
449 	fsp->m_u.tcp_ip4_spec.pdst = (__u16)~0;
450 
451 	fsp->ring_cookie = n->rq_id;
452 
453 	return 0;
454 }
455 
enic_get_rx_flow_hash(struct enic * enic,struct ethtool_rxnfc * cmd)456 static int enic_get_rx_flow_hash(struct enic *enic, struct ethtool_rxnfc *cmd)
457 {
458 	u8 rss_hash_type = 0;
459 	cmd->data = 0;
460 
461 	spin_lock_bh(&enic->devcmd_lock);
462 	(void)vnic_dev_capable_rss_hash_type(enic->vdev, &rss_hash_type);
463 	spin_unlock_bh(&enic->devcmd_lock);
464 	switch (cmd->flow_type) {
465 	case TCP_V6_FLOW:
466 	case TCP_V4_FLOW:
467 		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3 |
468 			     RXH_IP_SRC | RXH_IP_DST;
469 		break;
470 	case UDP_V6_FLOW:
471 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
472 		if (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP_IPV6)
473 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
474 		break;
475 	case UDP_V4_FLOW:
476 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
477 		if (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP_IPV4)
478 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
479 		break;
480 	case SCTP_V4_FLOW:
481 	case AH_ESP_V4_FLOW:
482 	case AH_V4_FLOW:
483 	case ESP_V4_FLOW:
484 	case SCTP_V6_FLOW:
485 	case AH_ESP_V6_FLOW:
486 	case AH_V6_FLOW:
487 	case ESP_V6_FLOW:
488 	case IPV4_FLOW:
489 	case IPV6_FLOW:
490 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
491 		break;
492 	default:
493 		return -EINVAL;
494 	}
495 
496 	return 0;
497 }
498 
enic_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * rule_locs)499 static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
500 			  u32 *rule_locs)
501 {
502 	struct enic *enic = netdev_priv(dev);
503 	int ret = 0;
504 
505 	switch (cmd->cmd) {
506 	case ETHTOOL_GRXRINGS:
507 		cmd->data = enic->rq_count;
508 		break;
509 	case ETHTOOL_GRXCLSRLCNT:
510 		spin_lock_bh(&enic->rfs_h.lock);
511 		cmd->rule_cnt = enic->rfs_h.max - enic->rfs_h.free;
512 		cmd->data = enic->rfs_h.max;
513 		spin_unlock_bh(&enic->rfs_h.lock);
514 		break;
515 	case ETHTOOL_GRXCLSRLALL:
516 		spin_lock_bh(&enic->rfs_h.lock);
517 		ret = enic_grxclsrlall(enic, cmd, rule_locs);
518 		spin_unlock_bh(&enic->rfs_h.lock);
519 		break;
520 	case ETHTOOL_GRXCLSRULE:
521 		spin_lock_bh(&enic->rfs_h.lock);
522 		ret = enic_grxclsrule(enic, cmd);
523 		spin_unlock_bh(&enic->rfs_h.lock);
524 		break;
525 	case ETHTOOL_GRXFH:
526 		ret = enic_get_rx_flow_hash(enic, cmd);
527 		break;
528 	default:
529 		ret = -EOPNOTSUPP;
530 		break;
531 	}
532 
533 	return ret;
534 }
535 
enic_get_tunable(struct net_device * dev,const struct ethtool_tunable * tuna,void * data)536 static int enic_get_tunable(struct net_device *dev,
537 			    const struct ethtool_tunable *tuna, void *data)
538 {
539 	struct enic *enic = netdev_priv(dev);
540 	int ret = 0;
541 
542 	switch (tuna->id) {
543 	case ETHTOOL_RX_COPYBREAK:
544 		*(u32 *)data = enic->rx_copybreak;
545 		break;
546 	default:
547 		ret = -EINVAL;
548 		break;
549 	}
550 
551 	return ret;
552 }
553 
enic_set_tunable(struct net_device * dev,const struct ethtool_tunable * tuna,const void * data)554 static int enic_set_tunable(struct net_device *dev,
555 			    const struct ethtool_tunable *tuna,
556 			    const void *data)
557 {
558 	struct enic *enic = netdev_priv(dev);
559 	int ret = 0;
560 
561 	switch (tuna->id) {
562 	case ETHTOOL_RX_COPYBREAK:
563 		enic->rx_copybreak = *(u32 *)data;
564 		break;
565 	default:
566 		ret = -EINVAL;
567 		break;
568 	}
569 
570 	return ret;
571 }
572 
enic_get_rxfh_key_size(struct net_device * netdev)573 static u32 enic_get_rxfh_key_size(struct net_device *netdev)
574 {
575 	return ENIC_RSS_LEN;
576 }
577 
enic_get_rxfh(struct net_device * netdev,u32 * indir,u8 * hkey,u8 * hfunc)578 static int enic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey,
579 			 u8 *hfunc)
580 {
581 	struct enic *enic = netdev_priv(netdev);
582 
583 	if (hkey)
584 		memcpy(hkey, enic->rss_key, ENIC_RSS_LEN);
585 
586 	if (hfunc)
587 		*hfunc = ETH_RSS_HASH_TOP;
588 
589 	return 0;
590 }
591 
enic_set_rxfh(struct net_device * netdev,const u32 * indir,const u8 * hkey,const u8 hfunc)592 static int enic_set_rxfh(struct net_device *netdev, const u32 *indir,
593 			 const u8 *hkey, const u8 hfunc)
594 {
595 	struct enic *enic = netdev_priv(netdev);
596 
597 	if ((hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) ||
598 	    indir)
599 		return -EINVAL;
600 
601 	if (hkey)
602 		memcpy(enic->rss_key, hkey, ENIC_RSS_LEN);
603 
604 	return __enic_set_rsskey(enic);
605 }
606 
enic_get_ts_info(struct net_device * netdev,struct ethtool_ts_info * info)607 static int enic_get_ts_info(struct net_device *netdev,
608 			    struct ethtool_ts_info *info)
609 {
610 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
611 				SOF_TIMESTAMPING_RX_SOFTWARE |
612 				SOF_TIMESTAMPING_SOFTWARE;
613 
614 	return 0;
615 }
616 
617 static const struct ethtool_ops enic_ethtool_ops = {
618 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
619 				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
620 				     ETHTOOL_COALESCE_RX_USECS_LOW |
621 				     ETHTOOL_COALESCE_RX_USECS_HIGH,
622 	.get_drvinfo = enic_get_drvinfo,
623 	.get_msglevel = enic_get_msglevel,
624 	.set_msglevel = enic_set_msglevel,
625 	.get_link = ethtool_op_get_link,
626 	.get_strings = enic_get_strings,
627 	.get_ringparam = enic_get_ringparam,
628 	.set_ringparam = enic_set_ringparam,
629 	.get_sset_count = enic_get_sset_count,
630 	.get_ethtool_stats = enic_get_ethtool_stats,
631 	.get_coalesce = enic_get_coalesce,
632 	.set_coalesce = enic_set_coalesce,
633 	.get_rxnfc = enic_get_rxnfc,
634 	.get_tunable = enic_get_tunable,
635 	.set_tunable = enic_set_tunable,
636 	.get_rxfh_key_size = enic_get_rxfh_key_size,
637 	.get_rxfh = enic_get_rxfh,
638 	.set_rxfh = enic_set_rxfh,
639 	.get_link_ksettings = enic_get_ksettings,
640 	.get_ts_info = enic_get_ts_info,
641 };
642 
enic_set_ethtool_ops(struct net_device * netdev)643 void enic_set_ethtool_ops(struct net_device *netdev)
644 {
645 	netdev->ethtool_ops = &enic_ethtool_ops;
646 }
647