xref: /linux/drivers/net/ethernet/sfc/ef100_netdev.c (revision f72c38fa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3  * Driver for Solarflare network controllers and boards
4  * Copyright 2018 Solarflare Communications Inc.
5  * Copyright 2019-2020 Xilinx Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published
9  * by the Free Software Foundation, incorporated herein by reference.
10  */
11 #include "net_driver.h"
12 #include "mcdi_port_common.h"
13 #include "mcdi_functions.h"
14 #include "efx_common.h"
15 #include "efx_channels.h"
16 #include "tx_common.h"
17 #include "ef100_netdev.h"
18 #include "ef100_ethtool.h"
19 #include "nic_common.h"
20 #include "ef100_nic.h"
21 #include "ef100_tx.h"
22 #include "ef100_regs.h"
23 #include "mcdi_filters.h"
24 #include "rx_common.h"
25 #include "ef100_sriov.h"
26 
27 static void ef100_update_name(struct efx_nic *efx)
28 {
29 	strcpy(efx->name, efx->net_dev->name);
30 }
31 
32 static int ef100_alloc_vis(struct efx_nic *efx, unsigned int *allocated_vis)
33 {
34 	/* EF100 uses a single TXQ per channel, as all checksum offloading
35 	 * is configured in the TX descriptor, and there is no TX Pacer for
36 	 * HIGHPRI queues.
37 	 */
38 	unsigned int tx_vis = efx->n_tx_channels + efx->n_extra_tx_channels;
39 	unsigned int rx_vis = efx->n_rx_channels;
40 	unsigned int min_vis, max_vis;
41 
42 	EFX_WARN_ON_PARANOID(efx->tx_queues_per_channel != 1);
43 
44 	tx_vis += efx->n_xdp_channels * efx->xdp_tx_per_channel;
45 
46 	max_vis = max(rx_vis, tx_vis);
47 	/* Currently don't handle resource starvation and only accept
48 	 * our maximum needs and no less.
49 	 */
50 	min_vis = max_vis;
51 
52 	return efx_mcdi_alloc_vis(efx, min_vis, max_vis,
53 				  NULL, allocated_vis);
54 }
55 
56 static int ef100_remap_bar(struct efx_nic *efx, int max_vis)
57 {
58 	unsigned int uc_mem_map_size;
59 	void __iomem *membase;
60 
61 	efx->max_vis = max_vis;
62 	uc_mem_map_size = PAGE_ALIGN(max_vis * efx->vi_stride);
63 
64 	/* Extend the original UC mapping of the memory BAR */
65 	membase = ioremap(efx->membase_phys, uc_mem_map_size);
66 	if (!membase) {
67 		netif_err(efx, probe, efx->net_dev,
68 			  "could not extend memory BAR to %x\n",
69 			  uc_mem_map_size);
70 		return -ENOMEM;
71 	}
72 	iounmap(efx->membase);
73 	efx->membase = membase;
74 	return 0;
75 }
76 
77 /* Context: process, rtnl_lock() held.
78  * Note that the kernel will ignore our return code; this method
79  * should really be a void.
80  */
81 static int ef100_net_stop(struct net_device *net_dev)
82 {
83 	struct efx_nic *efx = efx_netdev_priv(net_dev);
84 
85 	netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
86 		  raw_smp_processor_id());
87 
88 	netif_stop_queue(net_dev);
89 	efx_stop_all(efx);
90 	efx_mcdi_mac_fini_stats(efx);
91 	efx_disable_interrupts(efx);
92 	efx_clear_interrupt_affinity(efx);
93 	efx_nic_fini_interrupt(efx);
94 	efx_remove_filters(efx);
95 	efx_fini_napi(efx);
96 	efx_remove_channels(efx);
97 	efx_mcdi_free_vis(efx);
98 	efx_remove_interrupts(efx);
99 
100 	efx->state = STATE_NET_DOWN;
101 
102 	return 0;
103 }
104 
105 /* Context: process, rtnl_lock() held. */
106 static int ef100_net_open(struct net_device *net_dev)
107 {
108 	struct efx_nic *efx = efx_netdev_priv(net_dev);
109 	unsigned int allocated_vis;
110 	int rc;
111 
112 	ef100_update_name(efx);
113 	netif_dbg(efx, ifup, net_dev, "opening device on CPU %d\n",
114 		  raw_smp_processor_id());
115 
116 	rc = efx_check_disabled(efx);
117 	if (rc)
118 		goto fail;
119 
120 	rc = efx_probe_interrupts(efx);
121 	if (rc)
122 		goto fail;
123 
124 	rc = efx_set_channels(efx);
125 	if (rc)
126 		goto fail;
127 
128 	rc = efx_mcdi_free_vis(efx);
129 	if (rc)
130 		goto fail;
131 
132 	rc = ef100_alloc_vis(efx, &allocated_vis);
133 	if (rc)
134 		goto fail;
135 
136 	rc = efx_probe_channels(efx);
137 	if (rc)
138 		return rc;
139 
140 	rc = ef100_remap_bar(efx, allocated_vis);
141 	if (rc)
142 		goto fail;
143 
144 	efx_init_napi(efx);
145 
146 	rc = efx_probe_filters(efx);
147 	if (rc)
148 		goto fail;
149 
150 	rc = efx_nic_init_interrupt(efx);
151 	if (rc)
152 		goto fail;
153 	efx_set_interrupt_affinity(efx);
154 
155 	rc = efx_enable_interrupts(efx);
156 	if (rc)
157 		goto fail;
158 
159 	/* in case the MC rebooted while we were stopped, consume the change
160 	 * to the warm reboot count
161 	 */
162 	(void) efx_mcdi_poll_reboot(efx);
163 
164 	rc = efx_mcdi_mac_init_stats(efx);
165 	if (rc)
166 		goto fail;
167 
168 	efx_start_all(efx);
169 
170 	/* Link state detection is normally event-driven; we have
171 	 * to poll now because we could have missed a change
172 	 */
173 	mutex_lock(&efx->mac_lock);
174 	if (efx_mcdi_phy_poll(efx))
175 		efx_link_status_changed(efx);
176 	mutex_unlock(&efx->mac_lock);
177 
178 	efx->state = STATE_NET_UP;
179 
180 	return 0;
181 
182 fail:
183 	ef100_net_stop(net_dev);
184 	return rc;
185 }
186 
187 /* Initiate a packet transmission.  We use one channel per CPU
188  * (sharing when we have more CPUs than channels).
189  *
190  * Context: non-blocking.
191  * Note that returning anything other than NETDEV_TX_OK will cause the
192  * OS to free the skb.
193  */
194 static netdev_tx_t ef100_hard_start_xmit(struct sk_buff *skb,
195 					 struct net_device *net_dev)
196 {
197 	struct efx_nic *efx = efx_netdev_priv(net_dev);
198 
199 	return __ef100_hard_start_xmit(skb, efx, net_dev, NULL);
200 }
201 
202 netdev_tx_t __ef100_hard_start_xmit(struct sk_buff *skb,
203 				    struct efx_nic *efx,
204 				    struct net_device *net_dev,
205 				    struct efx_rep *efv)
206 {
207 	struct efx_tx_queue *tx_queue;
208 	struct efx_channel *channel;
209 	int rc;
210 
211 	channel = efx_get_tx_channel(efx, skb_get_queue_mapping(skb));
212 	netif_vdbg(efx, tx_queued, efx->net_dev,
213 		   "%s len %d data %d channel %d\n", __func__,
214 		   skb->len, skb->data_len, channel->channel);
215 	if (!efx->n_channels || !efx->n_tx_channels || !channel) {
216 		netif_stop_queue(net_dev);
217 		goto err;
218 	}
219 
220 	tx_queue = &channel->tx_queue[0];
221 	rc = __ef100_enqueue_skb(tx_queue, skb, efv);
222 	if (rc == 0)
223 		return NETDEV_TX_OK;
224 
225 err:
226 	net_dev->stats.tx_dropped++;
227 	return NETDEV_TX_OK;
228 }
229 
230 static const struct net_device_ops ef100_netdev_ops = {
231 	.ndo_open               = ef100_net_open,
232 	.ndo_stop               = ef100_net_stop,
233 	.ndo_start_xmit         = ef100_hard_start_xmit,
234 	.ndo_tx_timeout         = efx_watchdog,
235 	.ndo_get_stats64        = efx_net_stats,
236 	.ndo_change_mtu         = efx_change_mtu,
237 	.ndo_validate_addr      = eth_validate_addr,
238 	.ndo_set_mac_address    = efx_set_mac_address,
239 	.ndo_set_rx_mode        = efx_set_rx_mode, /* Lookout */
240 	.ndo_set_features       = efx_set_features,
241 	.ndo_get_phys_port_id   = efx_get_phys_port_id,
242 	.ndo_get_phys_port_name = efx_get_phys_port_name,
243 #ifdef CONFIG_RFS_ACCEL
244 	.ndo_rx_flow_steer      = efx_filter_rfs,
245 #endif
246 };
247 
248 /*	Netdev registration
249  */
250 int ef100_netdev_event(struct notifier_block *this,
251 		       unsigned long event, void *ptr)
252 {
253 	struct efx_nic *efx = container_of(this, struct efx_nic, netdev_notifier);
254 	struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
255 
256 	if (efx->net_dev == net_dev &&
257 	    (event == NETDEV_CHANGENAME || event == NETDEV_REGISTER))
258 		ef100_update_name(efx);
259 
260 	return NOTIFY_DONE;
261 }
262 
263 static int ef100_register_netdev(struct efx_nic *efx)
264 {
265 	struct net_device *net_dev = efx->net_dev;
266 	int rc;
267 
268 	net_dev->watchdog_timeo = 5 * HZ;
269 	net_dev->irq = efx->pci_dev->irq;
270 	net_dev->netdev_ops = &ef100_netdev_ops;
271 	net_dev->min_mtu = EFX_MIN_MTU;
272 	net_dev->max_mtu = EFX_MAX_MTU;
273 	net_dev->ethtool_ops = &ef100_ethtool_ops;
274 
275 	rtnl_lock();
276 
277 	rc = dev_alloc_name(net_dev, net_dev->name);
278 	if (rc < 0)
279 		goto fail_locked;
280 	ef100_update_name(efx);
281 
282 	rc = register_netdevice(net_dev);
283 	if (rc)
284 		goto fail_locked;
285 
286 	/* Always start with carrier off; PHY events will detect the link */
287 	netif_carrier_off(net_dev);
288 
289 	efx->state = STATE_NET_DOWN;
290 	rtnl_unlock();
291 	efx_init_mcdi_logging(efx);
292 
293 	return 0;
294 
295 fail_locked:
296 	rtnl_unlock();
297 	netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
298 	return rc;
299 }
300 
301 static void ef100_unregister_netdev(struct efx_nic *efx)
302 {
303 	if (efx_dev_registered(efx)) {
304 		efx_fini_mcdi_logging(efx);
305 		efx->state = STATE_PROBED;
306 		unregister_netdev(efx->net_dev);
307 	}
308 }
309 
310 void ef100_remove_netdev(struct efx_probe_data *probe_data)
311 {
312 	struct efx_nic *efx = &probe_data->efx;
313 
314 	if (!efx->net_dev)
315 		return;
316 
317 	rtnl_lock();
318 	dev_close(efx->net_dev);
319 	rtnl_unlock();
320 
321 	unregister_netdevice_notifier(&efx->netdev_notifier);
322 #if defined(CONFIG_SFC_SRIOV)
323 	if (!efx->type->is_vf)
324 		efx_ef100_pci_sriov_disable(efx, true);
325 #endif
326 
327 	ef100_unregister_netdev(efx);
328 
329 	down_write(&efx->filter_sem);
330 	efx_mcdi_filter_table_remove(efx);
331 	up_write(&efx->filter_sem);
332 	efx_fini_channels(efx);
333 	kfree(efx->phy_data);
334 	efx->phy_data = NULL;
335 
336 	free_netdev(efx->net_dev);
337 	efx->net_dev = NULL;
338 	efx->state = STATE_PROBED;
339 }
340 
341 int ef100_probe_netdev(struct efx_probe_data *probe_data)
342 {
343 	struct efx_nic *efx = &probe_data->efx;
344 	struct efx_probe_data **probe_ptr;
345 	struct net_device *net_dev;
346 	int rc;
347 
348 	if (efx->mcdi->fn_flags &
349 			(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT)) {
350 		pci_info(efx->pci_dev, "No network port on this PCI function");
351 		return 0;
352 	}
353 
354 	/* Allocate and initialise a struct net_device */
355 	net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES);
356 	if (!net_dev)
357 		return -ENOMEM;
358 	probe_ptr = netdev_priv(net_dev);
359 	*probe_ptr = probe_data;
360 	efx->net_dev = net_dev;
361 	SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
362 
363 	net_dev->features |= efx->type->offload_features;
364 	net_dev->hw_features |= efx->type->offload_features;
365 	net_dev->hw_enc_features |= efx->type->offload_features;
366 	net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |
367 				  NETIF_F_HIGHDMA | NETIF_F_ALL_TSO;
368 	netif_set_tso_max_segs(net_dev,
369 			       ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT);
370 	efx->mdio.dev = net_dev;
371 
372 	rc = efx_ef100_init_datapath_caps(efx);
373 	if (rc < 0)
374 		goto fail;
375 
376 	rc = ef100_phy_probe(efx);
377 	if (rc)
378 		goto fail;
379 
380 	rc = efx_init_channels(efx);
381 	if (rc)
382 		goto fail;
383 
384 	down_write(&efx->filter_sem);
385 	rc = ef100_filter_table_probe(efx);
386 	up_write(&efx->filter_sem);
387 	if (rc)
388 		goto fail;
389 
390 	netdev_rss_key_fill(efx->rss_context.rx_hash_key,
391 			    sizeof(efx->rss_context.rx_hash_key));
392 
393 	/* Don't fail init if RSS setup doesn't work. */
394 	efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels);
395 
396 	rc = ef100_register_netdev(efx);
397 	if (rc)
398 		goto fail;
399 
400 	if (!efx->type->is_vf) {
401 		rc = ef100_probe_netdev_pf(efx);
402 		if (rc)
403 			goto fail;
404 	}
405 
406 	efx->netdev_notifier.notifier_call = ef100_netdev_event;
407 	rc = register_netdevice_notifier(&efx->netdev_notifier);
408 	if (rc) {
409 		netif_err(efx, probe, efx->net_dev,
410 			  "Failed to register netdevice notifier, rc=%d\n", rc);
411 		goto fail;
412 	}
413 
414 fail:
415 	return rc;
416 }
417