1 /*-
2 * Copyright (c) 2015,2016 Annapurna Labs Ltd. and affiliates
3 * All rights reserved.
4 *
5 * Developed by Semihalf.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/bus.h>
32 #include <sys/kernel.h>
33 #include <sys/kthread.h>
34 #include <sys/lock.h>
35 #include <sys/mbuf.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/rman.h>
39 #include <sys/socket.h>
40 #include <sys/sockio.h>
41 #include <sys/sysctl.h>
42 #include <sys/taskqueue.h>
43
44 #include <machine/atomic.h>
45
46 #include "opt_inet.h"
47 #include "opt_inet6.h"
48
49 #include <net/ethernet.h>
50 #include <net/if.h>
51 #include <net/if_var.h>
52 #include <net/if_arp.h>
53 #include <net/if_dl.h>
54 #include <net/if_media.h>
55 #include <net/if_types.h>
56 #include <netinet/in.h>
57 #include <net/if_vlan_var.h>
58 #include <netinet/tcp.h>
59 #include <netinet/tcp_lro.h>
60
61 #ifdef INET
62 #include <netinet/in.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/in_var.h>
65 #include <netinet/ip.h>
66 #endif
67
68 #ifdef INET6
69 #include <netinet/ip6.h>
70 #endif
71
72 #include <sys/sockio.h>
73
74 #include <dev/pci/pcireg.h>
75 #include <dev/pci/pcivar.h>
76
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79
80 #include <al_hal_common.h>
81 #include <al_hal_plat_services.h>
82 #include <al_hal_udma_config.h>
83 #include <al_hal_udma_iofic.h>
84 #include <al_hal_udma_debug.h>
85 #include <al_hal_eth.h>
86
87 #include "al_eth.h"
88 #include "al_init_eth_lm.h"
89 #include "arm/annapurna/alpine/alpine_serdes.h"
90
91 #include "miibus_if.h"
92
93 #define device_printf_dbg(fmt, ...) do { \
94 if (AL_DBG_LEVEL >= AL_DBG_LEVEL_DBG) { AL_DBG_LOCK(); \
95 device_printf(fmt, __VA_ARGS__); AL_DBG_UNLOCK();} \
96 } while (0)
97
98 MALLOC_DEFINE(M_IFAL, "if_al_malloc", "All allocated data for AL ETH driver");
99
100 /* move out to some pci header file */
101 #define PCI_VENDOR_ID_ANNAPURNA_LABS 0x1c36
102 #define PCI_DEVICE_ID_AL_ETH 0x0001
103 #define PCI_DEVICE_ID_AL_ETH_ADVANCED 0x0002
104 #define PCI_DEVICE_ID_AL_ETH_NIC 0x0003
105 #define PCI_DEVICE_ID_AL_ETH_FPGA_NIC 0x0030
106 #define PCI_DEVICE_ID_AL_CRYPTO 0x0011
107 #define PCI_DEVICE_ID_AL_CRYPTO_VF 0x8011
108 #define PCI_DEVICE_ID_AL_RAID_DMA 0x0021
109 #define PCI_DEVICE_ID_AL_RAID_DMA_VF 0x8021
110 #define PCI_DEVICE_ID_AL_USB 0x0041
111
112 #define MAC_ADDR_STR "%02x:%02x:%02x:%02x:%02x:%02x"
113 #define MAC_ADDR(addr) addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]
114
115 #define AL_ETH_MAC_TABLE_UNICAST_IDX_BASE 0
116 #define AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT 4
117 #define AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX (AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + \
118 AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT)
119
120 #define AL_ETH_MAC_TABLE_DROP_IDX (AL_ETH_FWD_MAC_NUM - 1)
121 #define AL_ETH_MAC_TABLE_BROADCAST_IDX (AL_ETH_MAC_TABLE_DROP_IDX - 1)
122
123 #define AL_ETH_THASH_UDMA_SHIFT 0
124 #define AL_ETH_THASH_UDMA_MASK (0xF << AL_ETH_THASH_UDMA_SHIFT)
125
126 #define AL_ETH_THASH_Q_SHIFT 4
127 #define AL_ETH_THASH_Q_MASK (0x3 << AL_ETH_THASH_Q_SHIFT)
128
129 /* the following defines should be moved to hal */
130 #define AL_ETH_FSM_ENTRY_IPV4_TCP 0
131 #define AL_ETH_FSM_ENTRY_IPV4_UDP 1
132 #define AL_ETH_FSM_ENTRY_IPV6_TCP 2
133 #define AL_ETH_FSM_ENTRY_IPV6_UDP 3
134 #define AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP 4
135 #define AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP 5
136
137 /* FSM DATA format */
138 #define AL_ETH_FSM_DATA_OUTER_2_TUPLE 0
139 #define AL_ETH_FSM_DATA_OUTER_4_TUPLE 1
140 #define AL_ETH_FSM_DATA_INNER_2_TUPLE 2
141 #define AL_ETH_FSM_DATA_INNER_4_TUPLE 3
142
143 #define AL_ETH_FSM_DATA_HASH_SEL (1 << 2)
144
145 #define AL_ETH_FSM_DATA_DEFAULT_Q 0
146 #define AL_ETH_FSM_DATA_DEFAULT_UDMA 0
147
148 #define AL_BR_SIZE 512
149 #define AL_TSO_SIZE 65500
150 #define AL_DEFAULT_MTU 1500
151
152 #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
153
154 #define AL_IP_ALIGNMENT_OFFSET 2
155
156 #define SFP_I2C_ADDR 0x50
157
158 #define AL_MASK_GROUP_A_INT 0x7
159 #define AL_MASK_GROUP_B_INT 0xF
160 #define AL_MASK_GROUP_C_INT 0xF
161 #define AL_MASK_GROUP_D_INT 0xFFFFFFFF
162
163 #define AL_REG_OFFSET_FORWARD_INTR (0x1800000 + 0x1210)
164 #define AL_EN_FORWARD_INTR 0x1FFFF
165 #define AL_DIS_FORWARD_INTR 0
166
167 #define AL_M2S_MASK_INIT 0x480
168 #define AL_S2M_MASK_INIT 0x1E0
169 #define AL_M2S_S2M_MASK_NOT_INT (0x3f << 25)
170
171 #define AL_10BASE_T_SPEED 10
172 #define AL_100BASE_TX_SPEED 100
173 #define AL_1000BASE_T_SPEED 1000
174
175 #define AL_RX_LOCK_INIT(_sc) mtx_init(&((_sc)->if_rx_lock), "ALRXL", "ALRXL", MTX_DEF)
176 #define AL_RX_LOCK(_sc) mtx_lock(&((_sc)->if_rx_lock))
177 #define AL_RX_UNLOCK(_sc) mtx_unlock(&((_sc)->if_rx_lock))
178
179 /* helper functions */
180 static int al_is_device_supported(device_t);
181
182 static void al_eth_init_rings(struct al_eth_adapter *);
183 static void al_eth_flow_ctrl_disable(struct al_eth_adapter *);
184 int al_eth_fpga_read_pci_config(void *, int, uint32_t *);
185 int al_eth_fpga_write_pci_config(void *, int, uint32_t);
186 int al_eth_read_pci_config(void *, int, uint32_t *);
187 int al_eth_write_pci_config(void *, int, uint32_t);
188 void al_eth_irq_config(uint32_t *, uint32_t);
189 void al_eth_forward_int_config(uint32_t *, uint32_t);
190 static void al_eth_start_xmit(void *, int);
191 static void al_eth_rx_recv_work(void *, int);
192 static int al_eth_up(struct al_eth_adapter *);
193 static void al_eth_down(struct al_eth_adapter *);
194 static void al_eth_interrupts_unmask(struct al_eth_adapter *);
195 static void al_eth_interrupts_mask(struct al_eth_adapter *);
196 static int al_eth_check_mtu(struct al_eth_adapter *, int);
197 static uint64_t al_get_counter(if_t, ift_counter);
198 static void al_eth_req_rx_buff_size(struct al_eth_adapter *, int);
199 static int al_eth_board_params_init(struct al_eth_adapter *);
200 static int al_media_update(if_t);
201 static void al_media_status(if_t, struct ifmediareq *);
202 static int al_eth_function_reset(struct al_eth_adapter *);
203 static int al_eth_hw_init_adapter(struct al_eth_adapter *);
204 static void al_eth_serdes_init(struct al_eth_adapter *);
205 static void al_eth_lm_config(struct al_eth_adapter *);
206 static int al_eth_hw_init(struct al_eth_adapter *);
207
208 static void al_tick_stats(void *);
209
210 /* ifnet entry points */
211 static void al_init(void *);
212 static int al_mq_start(if_t, struct mbuf *);
213 static void al_qflush(if_t);
214 static int al_ioctl(if_t ifp, u_long, caddr_t);
215
216 /* bus entry points */
217 static int al_probe(device_t);
218 static int al_attach(device_t);
219 static int al_detach(device_t);
220 static int al_shutdown(device_t);
221
222 /* mii bus support routines */
223 static int al_miibus_readreg(device_t, int, int);
224 static int al_miibus_writereg(device_t, int, int, int);
225 static void al_miibus_statchg(device_t);
226 static void al_miibus_linkchg(device_t);
227
228 struct al_eth_adapter* g_adapters[16];
229 uint32_t g_adapters_count;
230
231 /* flag for napi-like mbuf processing, controlled from sysctl */
232 static int napi = 0;
233
234 static device_method_t al_methods[] = {
235 /* Device interface */
236 DEVMETHOD(device_probe, al_probe),
237 DEVMETHOD(device_attach, al_attach),
238 DEVMETHOD(device_detach, al_detach),
239 DEVMETHOD(device_shutdown, al_shutdown),
240
241 DEVMETHOD(miibus_readreg, al_miibus_readreg),
242 DEVMETHOD(miibus_writereg, al_miibus_writereg),
243 DEVMETHOD(miibus_statchg, al_miibus_statchg),
244 DEVMETHOD(miibus_linkchg, al_miibus_linkchg),
245 { 0, 0 }
246 };
247
248 static driver_t al_driver = {
249 "al",
250 al_methods,
251 sizeof(struct al_eth_adapter),
252 };
253
254 DRIVER_MODULE(al, pci, al_driver, 0, 0);
255 DRIVER_MODULE(miibus, al, miibus_driver, 0, 0);
256
257 static int
al_probe(device_t dev)258 al_probe(device_t dev)
259 {
260 if ((al_is_device_supported(dev)) != 0) {
261 device_set_desc(dev, "al");
262 return (BUS_PROBE_DEFAULT);
263 }
264 return (ENXIO);
265 }
266
267 static int
al_attach(device_t dev)268 al_attach(device_t dev)
269 {
270 struct al_eth_adapter *adapter;
271 struct sysctl_oid_list *child;
272 struct sysctl_ctx_list *ctx;
273 struct sysctl_oid *tree;
274 if_t ifp;
275 uint32_t dev_id;
276 uint32_t rev_id;
277 int bar_udma;
278 int bar_mac;
279 int bar_ec;
280 int err;
281
282 err = 0;
283 ifp = NULL;
284 dev_id = rev_id = 0;
285 ctx = device_get_sysctl_ctx(dev);
286 tree = SYSCTL_PARENT(device_get_sysctl_tree(dev));
287 child = SYSCTL_CHILDREN(tree);
288
289 if (g_adapters_count == 0) {
290 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "napi",
291 CTLFLAG_RW, &napi, 0, "Use pseudo-napi mechanism");
292 }
293 adapter = device_get_softc(dev);
294 adapter->dev = dev;
295 adapter->board_type = ALPINE_INTEGRATED;
296 snprintf(adapter->name, AL_ETH_NAME_MAX_LEN, "%s",
297 device_get_nameunit(dev));
298 AL_RX_LOCK_INIT(adapter);
299
300 g_adapters[g_adapters_count] = adapter;
301
302 bar_udma = PCIR_BAR(AL_ETH_UDMA_BAR);
303 adapter->udma_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
304 &bar_udma, RF_ACTIVE);
305 if (adapter->udma_res == NULL) {
306 device_printf(adapter->dev,
307 "could not allocate memory resources for DMA.\n");
308 err = ENOMEM;
309 goto err_res_dma;
310 }
311 adapter->udma_base = al_bus_dma_to_va(rman_get_bustag(adapter->udma_res),
312 rman_get_bushandle(adapter->udma_res));
313 bar_mac = PCIR_BAR(AL_ETH_MAC_BAR);
314 adapter->mac_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
315 &bar_mac, RF_ACTIVE);
316 if (adapter->mac_res == NULL) {
317 device_printf(adapter->dev,
318 "could not allocate memory resources for MAC.\n");
319 err = ENOMEM;
320 goto err_res_mac;
321 }
322 adapter->mac_base = al_bus_dma_to_va(rman_get_bustag(adapter->mac_res),
323 rman_get_bushandle(adapter->mac_res));
324
325 bar_ec = PCIR_BAR(AL_ETH_EC_BAR);
326 adapter->ec_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar_ec,
327 RF_ACTIVE);
328 if (adapter->ec_res == NULL) {
329 device_printf(adapter->dev,
330 "could not allocate memory resources for EC.\n");
331 err = ENOMEM;
332 goto err_res_ec;
333 }
334 adapter->ec_base = al_bus_dma_to_va(rman_get_bustag(adapter->ec_res),
335 rman_get_bushandle(adapter->ec_res));
336
337 adapter->netdev = ifp = if_alloc(IFT_ETHER);
338
339 if_setsoftc(ifp, adapter);
340 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
341 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
342 if_setflags(ifp, if_getdrvflags(ifp));
343 if_setflagbits(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_ALLMULTI, 0);
344 if_settransmitfn(ifp, al_mq_start);
345 if_setqflushfn(ifp, al_qflush);
346 if_setioctlfn(ifp, al_ioctl);
347 if_setinitfn(ifp, al_init);
348 if_setgetcounterfn(ifp, al_get_counter);
349 if_setmtu(ifp, AL_DEFAULT_MTU);
350
351 adapter->if_flags = if_getflags(ifp);
352
353 if_setcapabilities(ifp, if_getcapenable(ifp) );
354
355 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM |
356 IFCAP_HWCSUM_IPV6 | IFCAP_TSO |
357 IFCAP_LRO | IFCAP_JUMBO_MTU, 0);
358
359 if_setcapenable(ifp, if_getcapabilities(ifp));
360
361 adapter->id_number = g_adapters_count;
362
363 if (adapter->board_type == ALPINE_INTEGRATED) {
364 dev_id = pci_get_device(adapter->dev);
365 rev_id = pci_get_revid(adapter->dev);
366 } else {
367 al_eth_fpga_read_pci_config(adapter->internal_pcie_base,
368 PCIR_DEVICE, &dev_id);
369 al_eth_fpga_read_pci_config(adapter->internal_pcie_base,
370 PCIR_REVID, &rev_id);
371 }
372
373 adapter->dev_id = dev_id;
374 adapter->rev_id = rev_id;
375
376 /* set default ring sizes */
377 adapter->tx_ring_count = AL_ETH_DEFAULT_TX_SW_DESCS;
378 adapter->tx_descs_count = AL_ETH_DEFAULT_TX_HW_DESCS;
379 adapter->rx_ring_count = AL_ETH_DEFAULT_RX_DESCS;
380 adapter->rx_descs_count = AL_ETH_DEFAULT_RX_DESCS;
381
382 adapter->num_tx_queues = AL_ETH_NUM_QUEUES;
383 adapter->num_rx_queues = AL_ETH_NUM_QUEUES;
384
385 adapter->small_copy_len = AL_ETH_DEFAULT_SMALL_PACKET_LEN;
386 adapter->link_poll_interval = AL_ETH_DEFAULT_LINK_POLL_INTERVAL;
387 adapter->max_rx_buff_alloc_size = AL_ETH_DEFAULT_MAX_RX_BUFF_ALLOC_SIZE;
388
389 al_eth_req_rx_buff_size(adapter, if_getmtu(adapter->netdev));
390
391 adapter->link_config.force_1000_base_x = AL_ETH_DEFAULT_FORCE_1000_BASEX;
392
393 err = al_eth_board_params_init(adapter);
394 if (err != 0)
395 goto err;
396
397 if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial) {
398 ifmedia_init(&adapter->media, IFM_IMASK,
399 al_media_update, al_media_status);
400 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
401 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
402 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
403 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
404 }
405
406 al_eth_function_reset(adapter);
407
408 err = al_eth_hw_init_adapter(adapter);
409 if (err != 0)
410 goto err;
411
412 al_eth_init_rings(adapter);
413 g_adapters_count++;
414
415 al_eth_lm_config(adapter);
416 mtx_init(&adapter->stats_mtx, "AlStatsMtx", NULL, MTX_DEF);
417 mtx_init(&adapter->wd_mtx, "AlWdMtx", NULL, MTX_DEF);
418 callout_init_mtx(&adapter->stats_callout, &adapter->stats_mtx, 0);
419 callout_init_mtx(&adapter->wd_callout, &adapter->wd_mtx, 0);
420
421 ether_ifattach(ifp, adapter->mac_addr);
422 if_setmtu(ifp, AL_DEFAULT_MTU);
423
424 if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) {
425 al_eth_hw_init(adapter);
426
427 /* Attach PHY(s) */
428 err = mii_attach(adapter->dev, &adapter->miibus, adapter->netdev,
429 al_media_update, al_media_status, BMSR_DEFCAPMASK, 0,
430 MII_OFFSET_ANY, 0);
431 if (err != 0) {
432 device_printf(adapter->dev, "attaching PHYs failed\n");
433 return (err);
434 }
435
436 adapter->mii = device_get_softc(adapter->miibus);
437 }
438
439 return (err);
440
441 err:
442 bus_release_resource(dev, SYS_RES_MEMORY, bar_ec, adapter->ec_res);
443 err_res_ec:
444 bus_release_resource(dev, SYS_RES_MEMORY, bar_mac, adapter->mac_res);
445 err_res_mac:
446 bus_release_resource(dev, SYS_RES_MEMORY, bar_udma, adapter->udma_res);
447 err_res_dma:
448 return (err);
449 }
450
451 static int
al_detach(device_t dev)452 al_detach(device_t dev)
453 {
454 struct al_eth_adapter *adapter;
455
456 adapter = device_get_softc(dev);
457 ether_ifdetach(adapter->netdev);
458
459 mtx_destroy(&adapter->stats_mtx);
460 mtx_destroy(&adapter->wd_mtx);
461
462 al_eth_down(adapter);
463
464 bus_release_resource(dev, SYS_RES_IRQ, 0, adapter->irq_res);
465 bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->ec_res);
466 bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->mac_res);
467 bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->udma_res);
468
469 return (0);
470 }
471
472 int
al_eth_fpga_read_pci_config(void * handle,int where,uint32_t * val)473 al_eth_fpga_read_pci_config(void *handle, int where, uint32_t *val)
474 {
475
476 /* handle is the base address of the adapter */
477 *val = al_reg_read32((void*)((u_long)handle + where));
478
479 return (0);
480 }
481
482 int
al_eth_fpga_write_pci_config(void * handle,int where,uint32_t val)483 al_eth_fpga_write_pci_config(void *handle, int where, uint32_t val)
484 {
485
486 /* handle is the base address of the adapter */
487 al_reg_write32((void*)((u_long)handle + where), val);
488 return (0);
489 }
490
491 int
al_eth_read_pci_config(void * handle,int where,uint32_t * val)492 al_eth_read_pci_config(void *handle, int where, uint32_t *val)
493 {
494
495 /* handle is a pci_dev */
496 *val = pci_read_config((device_t)handle, where, sizeof(*val));
497 return (0);
498 }
499
500 int
al_eth_write_pci_config(void * handle,int where,uint32_t val)501 al_eth_write_pci_config(void *handle, int where, uint32_t val)
502 {
503
504 /* handle is a pci_dev */
505 pci_write_config((device_t)handle, where, val, sizeof(val));
506 return (0);
507 }
508
509 void
al_eth_irq_config(uint32_t * offset,uint32_t value)510 al_eth_irq_config(uint32_t *offset, uint32_t value)
511 {
512
513 al_reg_write32_relaxed(offset, value);
514 }
515
516 void
al_eth_forward_int_config(uint32_t * offset,uint32_t value)517 al_eth_forward_int_config(uint32_t *offset, uint32_t value)
518 {
519
520 al_reg_write32(offset, value);
521 }
522
523 static void
al_eth_serdes_init(struct al_eth_adapter * adapter)524 al_eth_serdes_init(struct al_eth_adapter *adapter)
525 {
526 void __iomem *serdes_base;
527
528 adapter->serdes_init = false;
529
530 serdes_base = alpine_serdes_resource_get(adapter->serdes_grp);
531 if (serdes_base == NULL) {
532 device_printf(adapter->dev, "serdes_base get failed!\n");
533 return;
534 }
535
536 serdes_base = al_bus_dma_to_va(serdes_tag, serdes_base);
537
538 al_serdes_handle_grp_init(serdes_base, adapter->serdes_grp,
539 &adapter->serdes_obj);
540
541 adapter->serdes_init = true;
542 }
543
544 static void
al_dma_map_addr(void * arg,bus_dma_segment_t * segs,int nseg,int error)545 al_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
546 {
547 bus_addr_t *paddr;
548
549 paddr = arg;
550 *paddr = segs->ds_addr;
551 }
552
553 static int
al_dma_alloc_coherent(device_t dev,bus_dma_tag_t * tag,bus_dmamap_t * map,bus_addr_t * baddr,void ** vaddr,uint32_t size)554 al_dma_alloc_coherent(device_t dev, bus_dma_tag_t *tag, bus_dmamap_t *map,
555 bus_addr_t *baddr, void **vaddr, uint32_t size)
556 {
557 int ret;
558 uint32_t maxsize = ((size - 1)/PAGE_SIZE + 1) * PAGE_SIZE;
559
560 ret = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
561 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
562 maxsize, 1, maxsize, BUS_DMA_COHERENT, NULL, NULL, tag);
563 if (ret != 0) {
564 device_printf(dev,
565 "failed to create bus tag, ret = %d\n", ret);
566 return (ret);
567 }
568
569 ret = bus_dmamem_alloc(*tag, vaddr,
570 BUS_DMA_COHERENT | BUS_DMA_ZERO, map);
571 if (ret != 0) {
572 device_printf(dev,
573 "failed to allocate dmamem, ret = %d\n", ret);
574 return (ret);
575 }
576
577 ret = bus_dmamap_load(*tag, *map, *vaddr,
578 size, al_dma_map_addr, baddr, 0);
579 if (ret != 0) {
580 device_printf(dev,
581 "failed to allocate bus_dmamap_load, ret = %d\n", ret);
582 return (ret);
583 }
584
585 return (0);
586 }
587
588 static void
al_dma_free_coherent(bus_dma_tag_t tag,bus_dmamap_t map,void * vaddr)589 al_dma_free_coherent(bus_dma_tag_t tag, bus_dmamap_t map, void *vaddr)
590 {
591
592 bus_dmamap_unload(tag, map);
593 bus_dmamem_free(tag, vaddr, map);
594 bus_dma_tag_destroy(tag);
595 }
596
597 static void
al_eth_mac_table_unicast_add(struct al_eth_adapter * adapter,uint8_t idx,uint8_t udma_mask)598 al_eth_mac_table_unicast_add(struct al_eth_adapter *adapter,
599 uint8_t idx, uint8_t udma_mask)
600 {
601 struct al_eth_fwd_mac_table_entry entry = { { 0 } };
602
603 memcpy(entry.addr, adapter->mac_addr, sizeof(adapter->mac_addr));
604
605 memset(entry.mask, 0xff, sizeof(entry.mask));
606 entry.rx_valid = true;
607 entry.tx_valid = false;
608 entry.udma_mask = udma_mask;
609 entry.filter = false;
610
611 device_printf_dbg(adapter->dev,
612 "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
613 __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
614
615 al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
616 }
617
618 static void
al_eth_mac_table_all_multicast_add(struct al_eth_adapter * adapter,uint8_t idx,uint8_t udma_mask)619 al_eth_mac_table_all_multicast_add(struct al_eth_adapter *adapter, uint8_t idx,
620 uint8_t udma_mask)
621 {
622 struct al_eth_fwd_mac_table_entry entry = { { 0 } };
623
624 memset(entry.addr, 0x00, sizeof(entry.addr));
625 memset(entry.mask, 0x00, sizeof(entry.mask));
626 entry.mask[0] |= 1;
627 entry.addr[0] |= 1;
628
629 entry.rx_valid = true;
630 entry.tx_valid = false;
631 entry.udma_mask = udma_mask;
632 entry.filter = false;
633
634 device_printf_dbg(adapter->dev,
635 "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
636 __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
637
638 al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
639 }
640
641 static void
al_eth_mac_table_broadcast_add(struct al_eth_adapter * adapter,uint8_t idx,uint8_t udma_mask)642 al_eth_mac_table_broadcast_add(struct al_eth_adapter *adapter,
643 uint8_t idx, uint8_t udma_mask)
644 {
645 struct al_eth_fwd_mac_table_entry entry = { { 0 } };
646
647 memset(entry.addr, 0xff, sizeof(entry.addr));
648 memset(entry.mask, 0xff, sizeof(entry.mask));
649
650 entry.rx_valid = true;
651 entry.tx_valid = false;
652 entry.udma_mask = udma_mask;
653 entry.filter = false;
654
655 device_printf_dbg(adapter->dev,
656 "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
657 __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
658
659 al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
660 }
661
662 static void
al_eth_mac_table_promiscuous_set(struct al_eth_adapter * adapter,bool promiscuous)663 al_eth_mac_table_promiscuous_set(struct al_eth_adapter *adapter,
664 bool promiscuous)
665 {
666 struct al_eth_fwd_mac_table_entry entry = { { 0 } };
667
668 memset(entry.addr, 0x00, sizeof(entry.addr));
669 memset(entry.mask, 0x00, sizeof(entry.mask));
670
671 entry.rx_valid = true;
672 entry.tx_valid = false;
673 entry.udma_mask = (promiscuous) ? 1 : 0;
674 entry.filter = (promiscuous) ? false : true;
675
676 device_printf_dbg(adapter->dev, "%s: %s promiscuous mode\n",
677 __func__, (promiscuous) ? "enter" : "exit");
678
679 al_eth_fwd_mac_table_set(&adapter->hal_adapter,
680 AL_ETH_MAC_TABLE_DROP_IDX, &entry);
681 }
682
683 static void
al_eth_set_thash_table_entry(struct al_eth_adapter * adapter,uint8_t idx,uint8_t udma,uint32_t queue)684 al_eth_set_thash_table_entry(struct al_eth_adapter *adapter, uint8_t idx,
685 uint8_t udma, uint32_t queue)
686 {
687
688 if (udma != 0)
689 panic("only UDMA0 is supporter");
690
691 if (queue >= AL_ETH_NUM_QUEUES)
692 panic("invalid queue number");
693
694 al_eth_thash_table_set(&adapter->hal_adapter, idx, udma, queue);
695 }
696
697 /* init FSM, no tunneling supported yet, if packet is tcp/udp over ipv4/ipv6, use 4 tuple hash */
698 static void
al_eth_fsm_table_init(struct al_eth_adapter * adapter)699 al_eth_fsm_table_init(struct al_eth_adapter *adapter)
700 {
701 uint32_t val;
702 int i;
703
704 for (i = 0; i < AL_ETH_RX_FSM_TABLE_SIZE; i++) {
705 uint8_t outer_type = AL_ETH_FSM_ENTRY_OUTER(i);
706 switch (outer_type) {
707 case AL_ETH_FSM_ENTRY_IPV4_TCP:
708 case AL_ETH_FSM_ENTRY_IPV4_UDP:
709 case AL_ETH_FSM_ENTRY_IPV6_TCP:
710 case AL_ETH_FSM_ENTRY_IPV6_UDP:
711 val = AL_ETH_FSM_DATA_OUTER_4_TUPLE |
712 AL_ETH_FSM_DATA_HASH_SEL;
713 break;
714 case AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP:
715 case AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP:
716 val = AL_ETH_FSM_DATA_OUTER_2_TUPLE |
717 AL_ETH_FSM_DATA_HASH_SEL;
718 break;
719 default:
720 val = AL_ETH_FSM_DATA_DEFAULT_Q |
721 AL_ETH_FSM_DATA_DEFAULT_UDMA;
722 }
723 al_eth_fsm_table_set(&adapter->hal_adapter, i, val);
724 }
725 }
726
727 static void
al_eth_mac_table_entry_clear(struct al_eth_adapter * adapter,uint8_t idx)728 al_eth_mac_table_entry_clear(struct al_eth_adapter *adapter,
729 uint8_t idx)
730 {
731 struct al_eth_fwd_mac_table_entry entry = { { 0 } };
732
733 device_printf_dbg(adapter->dev, "%s: clear entry %d\n", __func__, idx);
734
735 al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
736 }
737
738 static int
al_eth_hw_init_adapter(struct al_eth_adapter * adapter)739 al_eth_hw_init_adapter(struct al_eth_adapter *adapter)
740 {
741 struct al_eth_adapter_params *params = &adapter->eth_hal_params;
742 int rc;
743
744 /* params->dev_id = adapter->dev_id; */
745 params->rev_id = adapter->rev_id;
746 params->udma_id = 0;
747 params->enable_rx_parser = 1; /* enable rx epe parser*/
748 params->udma_regs_base = adapter->udma_base; /* UDMA register base address */
749 params->ec_regs_base = adapter->ec_base; /* Ethernet controller registers base address */
750 params->mac_regs_base = adapter->mac_base; /* Ethernet MAC registers base address */
751 params->name = adapter->name;
752 params->serdes_lane = adapter->serdes_lane;
753
754 rc = al_eth_adapter_init(&adapter->hal_adapter, params);
755 if (rc != 0)
756 device_printf(adapter->dev, "%s failed at hal init!\n",
757 __func__);
758
759 if ((adapter->board_type == ALPINE_NIC) ||
760 (adapter->board_type == ALPINE_FPGA_NIC)) {
761 /* in pcie NIC mode, force eth UDMA to access PCIE0 using the vmid */
762 struct al_udma_gen_tgtid_conf conf;
763 int i;
764 for (i = 0; i < DMA_MAX_Q; i++) {
765 conf.tx_q_conf[i].queue_en = AL_TRUE;
766 conf.tx_q_conf[i].desc_en = AL_FALSE;
767 conf.tx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */
768 conf.rx_q_conf[i].queue_en = AL_TRUE;
769 conf.rx_q_conf[i].desc_en = AL_FALSE;
770 conf.rx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */
771 }
772 al_udma_gen_tgtid_conf_set(adapter->udma_base, &conf);
773 }
774
775 return (rc);
776 }
777
778 static void
al_eth_lm_config(struct al_eth_adapter * adapter)779 al_eth_lm_config(struct al_eth_adapter *adapter)
780 {
781 struct al_eth_lm_init_params params = {0};
782
783 params.adapter = &adapter->hal_adapter;
784 params.serdes_obj = &adapter->serdes_obj;
785 params.lane = adapter->serdes_lane;
786 params.sfp_detection = adapter->sfp_detection_needed;
787 if (adapter->sfp_detection_needed == true) {
788 params.sfp_bus_id = adapter->i2c_adapter_id;
789 params.sfp_i2c_addr = SFP_I2C_ADDR;
790 }
791
792 if (adapter->sfp_detection_needed == false) {
793 switch (adapter->mac_mode) {
794 case AL_ETH_MAC_MODE_10GbE_Serial:
795 if ((adapter->lt_en != 0) && (adapter->an_en != 0))
796 params.default_mode = AL_ETH_LM_MODE_10G_DA;
797 else
798 params.default_mode = AL_ETH_LM_MODE_10G_OPTIC;
799 break;
800 case AL_ETH_MAC_MODE_SGMII:
801 params.default_mode = AL_ETH_LM_MODE_1G;
802 break;
803 default:
804 params.default_mode = AL_ETH_LM_MODE_10G_DA;
805 }
806 } else
807 params.default_mode = AL_ETH_LM_MODE_10G_DA;
808
809 params.link_training = adapter->lt_en;
810 params.rx_equal = true;
811 params.static_values = !adapter->dont_override_serdes;
812 params.i2c_context = adapter;
813 params.kr_fec_enable = false;
814
815 params.retimer_exist = adapter->retimer.exist;
816 params.retimer_bus_id = adapter->retimer.bus_id;
817 params.retimer_i2c_addr = adapter->retimer.i2c_addr;
818 params.retimer_channel = adapter->retimer.channel;
819
820 al_eth_lm_init(&adapter->lm_context, ¶ms);
821 }
822
823 static int
al_eth_board_params_init(struct al_eth_adapter * adapter)824 al_eth_board_params_init(struct al_eth_adapter *adapter)
825 {
826
827 if (adapter->board_type == ALPINE_NIC) {
828 adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
829 adapter->sfp_detection_needed = false;
830 adapter->phy_exist = false;
831 adapter->an_en = false;
832 adapter->lt_en = false;
833 adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ;
834 adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
835 } else if (adapter->board_type == ALPINE_FPGA_NIC) {
836 adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
837 adapter->sfp_detection_needed = false;
838 adapter->phy_exist = false;
839 adapter->an_en = false;
840 adapter->lt_en = false;
841 adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ;
842 adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
843 } else {
844 struct al_eth_board_params params;
845 int rc;
846
847 adapter->auto_speed = false;
848
849 rc = al_eth_board_params_get(adapter->mac_base, ¶ms);
850 if (rc != 0) {
851 device_printf(adapter->dev,
852 "board info not available\n");
853 return (-1);
854 }
855
856 adapter->phy_exist = params.phy_exist == true;
857 adapter->phy_addr = params.phy_mdio_addr;
858 adapter->an_en = params.autoneg_enable;
859 adapter->lt_en = params.kr_lt_enable;
860 adapter->serdes_grp = params.serdes_grp;
861 adapter->serdes_lane = params.serdes_lane;
862 adapter->sfp_detection_needed = params.sfp_plus_module_exist;
863 adapter->i2c_adapter_id = params.i2c_adapter_id;
864 adapter->ref_clk_freq = params.ref_clk_freq;
865 adapter->dont_override_serdes = params.dont_override_serdes;
866 adapter->link_config.active_duplex = !params.half_duplex;
867 adapter->link_config.autoneg = !params.an_disable;
868 adapter->link_config.force_1000_base_x = params.force_1000_base_x;
869 adapter->retimer.exist = params.retimer_exist;
870 adapter->retimer.bus_id = params.retimer_bus_id;
871 adapter->retimer.i2c_addr = params.retimer_i2c_addr;
872 adapter->retimer.channel = params.retimer_channel;
873
874 switch (params.speed) {
875 default:
876 device_printf(adapter->dev,
877 "%s: invalid speed (%d)\n", __func__, params.speed);
878 case AL_ETH_BOARD_1G_SPEED_1000M:
879 adapter->link_config.active_speed = 1000;
880 break;
881 case AL_ETH_BOARD_1G_SPEED_100M:
882 adapter->link_config.active_speed = 100;
883 break;
884 case AL_ETH_BOARD_1G_SPEED_10M:
885 adapter->link_config.active_speed = 10;
886 break;
887 }
888
889 switch (params.mdio_freq) {
890 default:
891 device_printf(adapter->dev,
892 "%s: invalid mdio freq (%d)\n", __func__,
893 params.mdio_freq);
894 case AL_ETH_BOARD_MDIO_FREQ_2_5_MHZ:
895 adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
896 break;
897 case AL_ETH_BOARD_MDIO_FREQ_1_MHZ:
898 adapter->mdio_freq = AL_ETH_MDIO_FREQ_1000_KHZ;
899 break;
900 }
901
902 switch (params.media_type) {
903 case AL_ETH_BOARD_MEDIA_TYPE_RGMII:
904 if (params.sfp_plus_module_exist == true)
905 /* Backward compatibility */
906 adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
907 else
908 adapter->mac_mode = AL_ETH_MAC_MODE_RGMII;
909
910 adapter->use_lm = false;
911 break;
912 case AL_ETH_BOARD_MEDIA_TYPE_SGMII:
913 adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
914 adapter->use_lm = true;
915 break;
916 case AL_ETH_BOARD_MEDIA_TYPE_10GBASE_SR:
917 adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
918 adapter->use_lm = true;
919 break;
920 case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT:
921 adapter->sfp_detection_needed = true;
922 adapter->auto_speed = false;
923 adapter->use_lm = true;
924 break;
925 case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT_AUTO_SPEED:
926 adapter->sfp_detection_needed = true;
927 adapter->auto_speed = true;
928 adapter->mac_mode_set = false;
929 adapter->use_lm = true;
930
931 adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
932 break;
933 default:
934 device_printf(adapter->dev,
935 "%s: unsupported media type %d\n",
936 __func__, params.media_type);
937 return (-1);
938 }
939
940 device_printf(adapter->dev,
941 "Board info: phy exist %s. phy addr %d. mdio freq %u Khz. "
942 "SFP connected %s. media %d\n",
943 params.phy_exist ? "Yes" : "No",
944 params.phy_mdio_addr, adapter->mdio_freq,
945 params.sfp_plus_module_exist ? "Yes" : "No",
946 params.media_type);
947 }
948
949 al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr);
950
951 return (0);
952 }
953
954 static int
al_eth_function_reset(struct al_eth_adapter * adapter)955 al_eth_function_reset(struct al_eth_adapter *adapter)
956 {
957 struct al_eth_board_params params;
958 int rc;
959
960 /* save board params so we restore it after reset */
961 al_eth_board_params_get(adapter->mac_base, ¶ms);
962 al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr);
963 if (adapter->board_type == ALPINE_INTEGRATED)
964 rc = al_eth_flr_rmn(&al_eth_read_pci_config,
965 &al_eth_write_pci_config,
966 adapter->dev, adapter->mac_base);
967 else
968 rc = al_eth_flr_rmn(&al_eth_fpga_read_pci_config,
969 &al_eth_fpga_write_pci_config,
970 adapter->internal_pcie_base, adapter->mac_base);
971
972 /* restore params */
973 al_eth_board_params_set(adapter->mac_base, ¶ms);
974 al_eth_mac_addr_store(adapter->ec_base, 0, adapter->mac_addr);
975
976 return (rc);
977 }
978
979 static void
al_eth_init_rings(struct al_eth_adapter * adapter)980 al_eth_init_rings(struct al_eth_adapter *adapter)
981 {
982 int i;
983
984 for (i = 0; i < adapter->num_tx_queues; i++) {
985 struct al_eth_ring *ring = &adapter->tx_ring[i];
986
987 ring->ring_id = i;
988 ring->dev = adapter->dev;
989 ring->adapter = adapter;
990 ring->netdev = adapter->netdev;
991 al_udma_q_handle_get(&adapter->hal_adapter.tx_udma, i,
992 &ring->dma_q);
993 ring->sw_count = adapter->tx_ring_count;
994 ring->hw_count = adapter->tx_descs_count;
995 ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get((struct unit_regs *)adapter->udma_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C);
996 ring->unmask_val = ~(1 << i);
997 }
998
999 for (i = 0; i < adapter->num_rx_queues; i++) {
1000 struct al_eth_ring *ring = &adapter->rx_ring[i];
1001
1002 ring->ring_id = i;
1003 ring->dev = adapter->dev;
1004 ring->adapter = adapter;
1005 ring->netdev = adapter->netdev;
1006 al_udma_q_handle_get(&adapter->hal_adapter.rx_udma, i, &ring->dma_q);
1007 ring->sw_count = adapter->rx_ring_count;
1008 ring->hw_count = adapter->rx_descs_count;
1009 ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get(
1010 (struct unit_regs *)adapter->udma_base,
1011 AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B);
1012 ring->unmask_val = ~(1 << i);
1013 }
1014 }
1015
1016 static void
al_init_locked(void * arg)1017 al_init_locked(void *arg)
1018 {
1019 struct al_eth_adapter *adapter = arg;
1020 if_t ifp = adapter->netdev;
1021 int rc = 0;
1022
1023 al_eth_down(adapter);
1024 rc = al_eth_up(adapter);
1025
1026 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1027 if (rc == 0)
1028 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1029 }
1030
1031 static void
al_init(void * arg)1032 al_init(void *arg)
1033 {
1034 struct al_eth_adapter *adapter = arg;
1035
1036 al_init_locked(adapter);
1037 }
1038
1039 static inline int
al_eth_alloc_rx_buf(struct al_eth_adapter * adapter,struct al_eth_ring * rx_ring,struct al_eth_rx_buffer * rx_info)1040 al_eth_alloc_rx_buf(struct al_eth_adapter *adapter,
1041 struct al_eth_ring *rx_ring,
1042 struct al_eth_rx_buffer *rx_info)
1043 {
1044 struct al_buf *al_buf;
1045 bus_dma_segment_t segs[2];
1046 int error;
1047 int nsegs;
1048
1049 if (rx_info->m != NULL)
1050 return (0);
1051
1052 rx_info->data_size = adapter->rx_mbuf_sz;
1053
1054 AL_RX_LOCK(adapter);
1055
1056 /* Get mbuf using UMA allocator */
1057 rx_info->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1058 rx_info->data_size);
1059 AL_RX_UNLOCK(adapter);
1060
1061 if (rx_info->m == NULL)
1062 return (ENOMEM);
1063
1064 rx_info->m->m_pkthdr.len = rx_info->m->m_len = adapter->rx_mbuf_sz;
1065
1066 /* Map packets for DMA */
1067 error = bus_dmamap_load_mbuf_sg(rx_ring->dma_buf_tag, rx_info->dma_map,
1068 rx_info->m, segs, &nsegs, BUS_DMA_NOWAIT);
1069 if (__predict_false(error)) {
1070 device_printf(rx_ring->dev, "failed to map mbuf, error = %d\n",
1071 error);
1072 m_freem(rx_info->m);
1073 rx_info->m = NULL;
1074 return (EFAULT);
1075 }
1076
1077 al_buf = &rx_info->al_buf;
1078 al_buf->addr = segs[0].ds_addr + AL_IP_ALIGNMENT_OFFSET;
1079 al_buf->len = rx_info->data_size - AL_IP_ALIGNMENT_OFFSET;
1080
1081 return (0);
1082 }
1083
1084 static int
al_eth_refill_rx_bufs(struct al_eth_adapter * adapter,unsigned int qid,unsigned int num)1085 al_eth_refill_rx_bufs(struct al_eth_adapter *adapter, unsigned int qid,
1086 unsigned int num)
1087 {
1088 struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
1089 uint16_t next_to_use;
1090 unsigned int i;
1091
1092 next_to_use = rx_ring->next_to_use;
1093
1094 for (i = 0; i < num; i++) {
1095 int rc;
1096 struct al_eth_rx_buffer *rx_info =
1097 &rx_ring->rx_buffer_info[next_to_use];
1098
1099 if (__predict_false(al_eth_alloc_rx_buf(adapter,
1100 rx_ring, rx_info) < 0)) {
1101 device_printf(adapter->dev,
1102 "failed to alloc buffer for rx queue %d\n", qid);
1103 break;
1104 }
1105
1106 rc = al_eth_rx_buffer_add(rx_ring->dma_q,
1107 &rx_info->al_buf, AL_ETH_RX_FLAGS_INT, NULL);
1108 if (__predict_false(rc)) {
1109 device_printf(adapter->dev,
1110 "failed to add buffer for rx queue %d\n", qid);
1111 break;
1112 }
1113
1114 next_to_use = AL_ETH_RX_RING_IDX_NEXT(rx_ring, next_to_use);
1115 }
1116
1117 if (__predict_false(i < num))
1118 device_printf(adapter->dev,
1119 "refilled rx queue %d with %d pages only - available %d\n",
1120 qid, i, al_udma_available_get(rx_ring->dma_q));
1121
1122 if (__predict_true(i))
1123 al_eth_rx_buffer_action(rx_ring->dma_q, i);
1124
1125 rx_ring->next_to_use = next_to_use;
1126
1127 return (i);
1128 }
1129
1130 /*
1131 * al_eth_refill_all_rx_bufs - allocate all queues Rx buffers
1132 * @adapter: board private structure
1133 */
1134 static void
al_eth_refill_all_rx_bufs(struct al_eth_adapter * adapter)1135 al_eth_refill_all_rx_bufs(struct al_eth_adapter *adapter)
1136 {
1137 int i;
1138
1139 for (i = 0; i < adapter->num_rx_queues; i++)
1140 al_eth_refill_rx_bufs(adapter, i, AL_ETH_DEFAULT_RX_DESCS - 1);
1141 }
1142
1143 static void
al_eth_tx_do_cleanup(struct al_eth_ring * tx_ring)1144 al_eth_tx_do_cleanup(struct al_eth_ring *tx_ring)
1145 {
1146 unsigned int total_done;
1147 uint16_t next_to_clean;
1148 int qid = tx_ring->ring_id;
1149
1150 total_done = al_eth_comp_tx_get(tx_ring->dma_q);
1151 device_printf_dbg(tx_ring->dev,
1152 "tx_poll: q %d total completed descs %x\n", qid, total_done);
1153 next_to_clean = tx_ring->next_to_clean;
1154
1155 while (total_done != 0) {
1156 struct al_eth_tx_buffer *tx_info;
1157 struct mbuf *mbuf;
1158
1159 tx_info = &tx_ring->tx_buffer_info[next_to_clean];
1160 /* stop if not all descriptors of the packet are completed */
1161 if (tx_info->tx_descs > total_done)
1162 break;
1163
1164 mbuf = tx_info->m;
1165
1166 tx_info->m = NULL;
1167
1168 device_printf_dbg(tx_ring->dev,
1169 "tx_poll: q %d mbuf %p completed\n", qid, mbuf);
1170
1171 /* map is no longer required */
1172 bus_dmamap_unload(tx_ring->dma_buf_tag, tx_info->dma_map);
1173
1174 m_freem(mbuf);
1175 total_done -= tx_info->tx_descs;
1176 next_to_clean = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_clean);
1177 }
1178
1179 tx_ring->next_to_clean = next_to_clean;
1180
1181 device_printf_dbg(tx_ring->dev, "tx_poll: q %d done next to clean %x\n",
1182 qid, next_to_clean);
1183
1184 /*
1185 * need to make the rings circular update visible to
1186 * al_eth_start_xmit() before checking for netif_queue_stopped().
1187 */
1188 al_smp_data_memory_barrier();
1189 }
1190
1191 static void
al_eth_tx_csum(struct al_eth_ring * tx_ring,struct al_eth_tx_buffer * tx_info,struct al_eth_pkt * hal_pkt,struct mbuf * m)1192 al_eth_tx_csum(struct al_eth_ring *tx_ring, struct al_eth_tx_buffer *tx_info,
1193 struct al_eth_pkt *hal_pkt, struct mbuf *m)
1194 {
1195 uint32_t mss = m->m_pkthdr.tso_segsz;
1196 struct ether_vlan_header *eh;
1197 uint16_t etype;
1198 #ifdef INET
1199 struct ip *ip;
1200 #endif
1201 #ifdef INET6
1202 struct ip6_hdr *ip6;
1203 #endif
1204 struct tcphdr *th = NULL;
1205 int ehdrlen, ip_hlen = 0;
1206 uint8_t ipproto = 0;
1207 uint32_t offload = 0;
1208
1209 if (mss != 0)
1210 offload = 1;
1211
1212 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0)
1213 offload = 1;
1214
1215 if ((m->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0)
1216 offload = 1;
1217
1218 if (offload != 0) {
1219 struct al_eth_meta_data *meta = &tx_ring->hal_meta;
1220
1221 if (mss != 0)
1222 hal_pkt->flags |= (AL_ETH_TX_FLAGS_TSO |
1223 AL_ETH_TX_FLAGS_L4_CSUM);
1224 else
1225 hal_pkt->flags |= (AL_ETH_TX_FLAGS_L4_CSUM |
1226 AL_ETH_TX_FLAGS_L4_PARTIAL_CSUM);
1227
1228 /*
1229 * Determine where frame payload starts.
1230 * Jump over vlan headers if already present,
1231 * helpful for QinQ too.
1232 */
1233 eh = mtod(m, struct ether_vlan_header *);
1234 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1235 etype = ntohs(eh->evl_proto);
1236 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1237 } else {
1238 etype = ntohs(eh->evl_encap_proto);
1239 ehdrlen = ETHER_HDR_LEN;
1240 }
1241
1242 switch (etype) {
1243 #ifdef INET
1244 case ETHERTYPE_IP:
1245 ip = (struct ip *)(m->m_data + ehdrlen);
1246 ip_hlen = ip->ip_hl << 2;
1247 ipproto = ip->ip_p;
1248 hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv4;
1249 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1250 if (mss != 0)
1251 hal_pkt->flags |= AL_ETH_TX_FLAGS_IPV4_L3_CSUM;
1252 if (ipproto == IPPROTO_TCP)
1253 hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP;
1254 else
1255 hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP;
1256 break;
1257 #endif /* INET */
1258 #ifdef INET6
1259 case ETHERTYPE_IPV6:
1260 ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1261 hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv6;
1262 ip_hlen = sizeof(struct ip6_hdr);
1263 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1264 ipproto = ip6->ip6_nxt;
1265 if (ipproto == IPPROTO_TCP)
1266 hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP;
1267 else
1268 hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP;
1269 break;
1270 #endif /* INET6 */
1271 default:
1272 break;
1273 }
1274
1275 meta->words_valid = 4;
1276 meta->l3_header_len = ip_hlen;
1277 meta->l3_header_offset = ehdrlen;
1278 if (th != NULL)
1279 meta->l4_header_len = th->th_off; /* this param needed only for TSO */
1280 meta->mss_idx_sel = 0; /* check how to select MSS */
1281 meta->mss_val = mss;
1282 hal_pkt->meta = meta;
1283 } else
1284 hal_pkt->meta = NULL;
1285 }
1286
1287 #define XMIT_QUEUE_TIMEOUT 100
1288
1289 static void
al_eth_xmit_mbuf(struct al_eth_ring * tx_ring,struct mbuf * m)1290 al_eth_xmit_mbuf(struct al_eth_ring *tx_ring, struct mbuf *m)
1291 {
1292 struct al_eth_tx_buffer *tx_info;
1293 int error;
1294 int nsegs, a;
1295 uint16_t next_to_use;
1296 bus_dma_segment_t segs[AL_ETH_PKT_MAX_BUFS + 1];
1297 struct al_eth_pkt *hal_pkt;
1298 struct al_buf *al_buf;
1299 bool remap;
1300
1301 /* Check if queue is ready */
1302 if (unlikely(tx_ring->stall) != 0) {
1303 for (a = 0; a < XMIT_QUEUE_TIMEOUT; a++) {
1304 if (al_udma_available_get(tx_ring->dma_q) >=
1305 (AL_ETH_DEFAULT_TX_HW_DESCS -
1306 AL_ETH_TX_WAKEUP_THRESH)) {
1307 tx_ring->stall = 0;
1308 break;
1309 }
1310 pause("stall", 1);
1311 }
1312 if (a == XMIT_QUEUE_TIMEOUT) {
1313 device_printf(tx_ring->dev,
1314 "timeout waiting for queue %d ready!\n",
1315 tx_ring->ring_id);
1316 return;
1317 } else {
1318 device_printf_dbg(tx_ring->dev,
1319 "queue %d is ready!\n", tx_ring->ring_id);
1320 }
1321 }
1322
1323 next_to_use = tx_ring->next_to_use;
1324 tx_info = &tx_ring->tx_buffer_info[next_to_use];
1325 tx_info->m = m;
1326 hal_pkt = &tx_info->hal_pkt;
1327
1328 if (m == NULL) {
1329 device_printf(tx_ring->dev, "mbuf is NULL\n");
1330 return;
1331 }
1332
1333 remap = true;
1334 /* Map packets for DMA */
1335 retry:
1336 error = bus_dmamap_load_mbuf_sg(tx_ring->dma_buf_tag, tx_info->dma_map,
1337 m, segs, &nsegs, BUS_DMA_NOWAIT);
1338 if (__predict_false(error)) {
1339 struct mbuf *m_new;
1340
1341 if (error == EFBIG) {
1342 /* Try it again? - one try */
1343 if (remap == true) {
1344 remap = false;
1345 m_new = m_defrag(m, M_NOWAIT);
1346 if (m_new == NULL) {
1347 device_printf(tx_ring->dev,
1348 "failed to defrag mbuf\n");
1349 goto exit;
1350 }
1351 m = m_new;
1352 goto retry;
1353 } else {
1354 device_printf(tx_ring->dev,
1355 "failed to map mbuf, error %d\n", error);
1356 goto exit;
1357 }
1358 } else {
1359 device_printf(tx_ring->dev,
1360 "failed to map mbuf, error %d\n", error);
1361 goto exit;
1362 }
1363 }
1364
1365 /* set flags and meta data */
1366 hal_pkt->flags = AL_ETH_TX_FLAGS_INT;
1367 al_eth_tx_csum(tx_ring, tx_info, hal_pkt, m);
1368
1369 al_buf = hal_pkt->bufs;
1370 for (a = 0; a < nsegs; a++) {
1371 al_buf->addr = segs[a].ds_addr;
1372 al_buf->len = segs[a].ds_len;
1373
1374 al_buf++;
1375 }
1376
1377 hal_pkt->num_of_bufs = nsegs;
1378
1379 /* prepare the packet's descriptors to dma engine */
1380 tx_info->tx_descs = al_eth_tx_pkt_prepare(tx_ring->dma_q, hal_pkt);
1381
1382 if (tx_info->tx_descs == 0)
1383 goto exit;
1384
1385 /*
1386 * stop the queue when no more space available, the packet can have up
1387 * to AL_ETH_PKT_MAX_BUFS + 1 buffers and a meta descriptor
1388 */
1389 if (unlikely(al_udma_available_get(tx_ring->dma_q) <
1390 (AL_ETH_PKT_MAX_BUFS + 2))) {
1391 tx_ring->stall = 1;
1392 device_printf_dbg(tx_ring->dev, "stall, stopping queue %d...\n",
1393 tx_ring->ring_id);
1394 al_data_memory_barrier();
1395 }
1396
1397 tx_ring->next_to_use = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_use);
1398
1399 /* trigger the dma engine */
1400 al_eth_tx_dma_action(tx_ring->dma_q, tx_info->tx_descs);
1401 return;
1402
1403 exit:
1404 m_freem(m);
1405 }
1406
1407 static void
al_eth_tx_cmpl_work(void * arg,int pending)1408 al_eth_tx_cmpl_work(void *arg, int pending)
1409 {
1410 struct al_eth_ring *tx_ring = arg;
1411
1412 if (napi != 0) {
1413 tx_ring->cmpl_is_running = 1;
1414 al_data_memory_barrier();
1415 }
1416
1417 al_eth_tx_do_cleanup(tx_ring);
1418
1419 if (napi != 0) {
1420 tx_ring->cmpl_is_running = 0;
1421 al_data_memory_barrier();
1422 }
1423 /* all work done, enable IRQs */
1424 al_eth_irq_config(tx_ring->unmask_reg_offset, tx_ring->unmask_val);
1425 }
1426
1427 static int
al_eth_tx_cmlp_irq_filter(void * arg)1428 al_eth_tx_cmlp_irq_filter(void *arg)
1429 {
1430 struct al_eth_ring *tx_ring = arg;
1431
1432 /* Interrupt should be auto-masked upon arrival */
1433
1434 device_printf_dbg(tx_ring->dev, "%s for ring ID = %d\n", __func__,
1435 tx_ring->ring_id);
1436
1437 /*
1438 * For napi, if work is not running, schedule it. Always schedule
1439 * for casual (non-napi) packet handling.
1440 */
1441 if ((napi == 0) || (napi && tx_ring->cmpl_is_running == 0))
1442 taskqueue_enqueue(tx_ring->cmpl_tq, &tx_ring->cmpl_task);
1443
1444 /* Do not run bottom half */
1445 return (FILTER_HANDLED);
1446 }
1447
1448 static int
al_eth_rx_recv_irq_filter(void * arg)1449 al_eth_rx_recv_irq_filter(void *arg)
1450 {
1451 struct al_eth_ring *rx_ring = arg;
1452
1453 /* Interrupt should be auto-masked upon arrival */
1454
1455 device_printf_dbg(rx_ring->dev, "%s for ring ID = %d\n", __func__,
1456 rx_ring->ring_id);
1457
1458 /*
1459 * For napi, if work is not running, schedule it. Always schedule
1460 * for casual (non-napi) packet handling.
1461 */
1462 if ((napi == 0) || (napi && rx_ring->enqueue_is_running == 0))
1463 taskqueue_enqueue(rx_ring->enqueue_tq, &rx_ring->enqueue_task);
1464
1465 /* Do not run bottom half */
1466 return (FILTER_HANDLED);
1467 }
1468
1469 /*
1470 * al_eth_rx_checksum - indicate in mbuf if hw indicated a good cksum
1471 * @adapter: structure containing adapter specific data
1472 * @hal_pkt: HAL structure for the packet
1473 * @mbuf: mbuf currently being received and modified
1474 */
1475 static inline void
al_eth_rx_checksum(struct al_eth_adapter * adapter,struct al_eth_pkt * hal_pkt,struct mbuf * mbuf)1476 al_eth_rx_checksum(struct al_eth_adapter *adapter,
1477 struct al_eth_pkt *hal_pkt, struct mbuf *mbuf)
1478 {
1479
1480 /* if IPv4 and error */
1481 if (unlikely((if_getcapenable(adapter->netdev) & IFCAP_RXCSUM) &&
1482 (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv4) &&
1483 (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) {
1484 device_printf(adapter->dev,"rx ipv4 header checksum error\n");
1485 return;
1486 }
1487
1488 /* if IPv6 and error */
1489 if (unlikely((if_getcapenable(adapter->netdev) & IFCAP_RXCSUM_IPV6) &&
1490 (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv6) &&
1491 (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) {
1492 device_printf(adapter->dev,"rx ipv6 header checksum error\n");
1493 return;
1494 }
1495
1496 /* if TCP/UDP */
1497 if (likely((hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) ||
1498 (hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_UDP))) {
1499 if (unlikely(hal_pkt->flags & AL_ETH_RX_FLAGS_L4_CSUM_ERR)) {
1500 device_printf_dbg(adapter->dev, "rx L4 checksum error\n");
1501
1502 /* TCP/UDP checksum error */
1503 mbuf->m_pkthdr.csum_flags = 0;
1504 } else {
1505 device_printf_dbg(adapter->dev, "rx checksum correct\n");
1506
1507 /* IP Checksum Good */
1508 mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1509 mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1510 }
1511 }
1512 }
1513
1514 static struct mbuf*
al_eth_rx_mbuf(struct al_eth_adapter * adapter,struct al_eth_ring * rx_ring,struct al_eth_pkt * hal_pkt,unsigned int descs,uint16_t * next_to_clean)1515 al_eth_rx_mbuf(struct al_eth_adapter *adapter,
1516 struct al_eth_ring *rx_ring, struct al_eth_pkt *hal_pkt,
1517 unsigned int descs, uint16_t *next_to_clean)
1518 {
1519 struct mbuf *mbuf;
1520 struct al_eth_rx_buffer *rx_info =
1521 &rx_ring->rx_buffer_info[*next_to_clean];
1522 unsigned int len;
1523
1524 len = hal_pkt->bufs[0].len;
1525 device_printf_dbg(adapter->dev, "rx_info %p data %p\n", rx_info,
1526 rx_info->m);
1527
1528 if (rx_info->m == NULL) {
1529 *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring,
1530 *next_to_clean);
1531 return (NULL);
1532 }
1533
1534 mbuf = rx_info->m;
1535 mbuf->m_pkthdr.len = len;
1536 mbuf->m_len = len;
1537 mbuf->m_pkthdr.rcvif = rx_ring->netdev;
1538 mbuf->m_flags |= M_PKTHDR;
1539
1540 if (len <= adapter->small_copy_len) {
1541 struct mbuf *smbuf;
1542 device_printf_dbg(adapter->dev, "rx small packet. len %d\n", len);
1543
1544 AL_RX_LOCK(adapter);
1545 smbuf = m_gethdr(M_NOWAIT, MT_DATA);
1546 AL_RX_UNLOCK(adapter);
1547 if (__predict_false(smbuf == NULL)) {
1548 device_printf(adapter->dev, "smbuf is NULL\n");
1549 return (NULL);
1550 }
1551
1552 smbuf->m_data = smbuf->m_data + AL_IP_ALIGNMENT_OFFSET;
1553 memcpy(smbuf->m_data, mbuf->m_data + AL_IP_ALIGNMENT_OFFSET, len);
1554
1555 smbuf->m_len = len;
1556 smbuf->m_pkthdr.rcvif = rx_ring->netdev;
1557
1558 /* first desc of a non-ps chain */
1559 smbuf->m_flags |= M_PKTHDR;
1560 smbuf->m_pkthdr.len = smbuf->m_len;
1561
1562 *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring,
1563 *next_to_clean);
1564
1565 return (smbuf);
1566 }
1567 mbuf->m_data = mbuf->m_data + AL_IP_ALIGNMENT_OFFSET;
1568
1569 /* Unmap the buffer */
1570 bus_dmamap_unload(rx_ring->dma_buf_tag, rx_info->dma_map);
1571
1572 rx_info->m = NULL;
1573 *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean);
1574
1575 return (mbuf);
1576 }
1577
1578 static void
al_eth_rx_recv_work(void * arg,int pending)1579 al_eth_rx_recv_work(void *arg, int pending)
1580 {
1581 struct al_eth_ring *rx_ring = arg;
1582 struct mbuf *mbuf;
1583 struct lro_entry *queued;
1584 unsigned int qid = rx_ring->ring_id;
1585 struct al_eth_pkt *hal_pkt = &rx_ring->hal_pkt;
1586 uint16_t next_to_clean = rx_ring->next_to_clean;
1587 uint32_t refill_required;
1588 uint32_t refill_actual;
1589 uint32_t do_if_input;
1590
1591 if (napi != 0) {
1592 rx_ring->enqueue_is_running = 1;
1593 al_data_memory_barrier();
1594 }
1595
1596 do {
1597 unsigned int descs;
1598
1599 descs = al_eth_pkt_rx(rx_ring->dma_q, hal_pkt);
1600 if (unlikely(descs == 0))
1601 break;
1602
1603 device_printf_dbg(rx_ring->dev, "rx_poll: q %d got packet "
1604 "from hal. descs %d\n", qid, descs);
1605 device_printf_dbg(rx_ring->dev, "rx_poll: q %d flags %x. "
1606 "l3 proto %d l4 proto %d\n", qid, hal_pkt->flags,
1607 hal_pkt->l3_proto_idx, hal_pkt->l4_proto_idx);
1608
1609 /* ignore if detected dma or eth controller errors */
1610 if ((hal_pkt->flags & (AL_ETH_RX_ERROR |
1611 AL_UDMA_CDESC_ERROR)) != 0) {
1612 device_printf(rx_ring->dev, "receive packet with error. "
1613 "flags = 0x%x\n", hal_pkt->flags);
1614 next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring,
1615 next_to_clean, descs);
1616 continue;
1617 }
1618
1619 /* allocate mbuf and fill it */
1620 mbuf = al_eth_rx_mbuf(rx_ring->adapter, rx_ring, hal_pkt, descs,
1621 &next_to_clean);
1622
1623 /* exit if we failed to retrieve a buffer */
1624 if (unlikely(mbuf == NULL)) {
1625 next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring,
1626 next_to_clean, descs);
1627 break;
1628 }
1629
1630 if (__predict_true(if_getcapenable(rx_ring->netdev) & IFCAP_RXCSUM ||
1631 if_getcapenable(rx_ring->netdev) & IFCAP_RXCSUM_IPV6)) {
1632 al_eth_rx_checksum(rx_ring->adapter, hal_pkt, mbuf);
1633 }
1634
1635 mbuf->m_pkthdr.flowid = qid;
1636 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
1637
1638 /*
1639 * LRO is only for IP/TCP packets and TCP checksum of the packet
1640 * should be computed by hardware.
1641 */
1642 do_if_input = 1;
1643 if ((rx_ring->lro_enabled != 0) &&
1644 ((mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) != 0) &&
1645 hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) {
1646 /*
1647 * Send to the stack if:
1648 * - LRO not enabled, or
1649 * - no LRO resources, or
1650 * - lro enqueue fails
1651 */
1652 if (rx_ring->lro.lro_cnt != 0) {
1653 if (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0)
1654 do_if_input = 0;
1655 }
1656 }
1657
1658 if (do_if_input)
1659 if_input(rx_ring->netdev, mbuf);
1660
1661 } while (1);
1662
1663 rx_ring->next_to_clean = next_to_clean;
1664
1665 refill_required = al_udma_available_get(rx_ring->dma_q);
1666 refill_actual = al_eth_refill_rx_bufs(rx_ring->adapter, qid,
1667 refill_required);
1668
1669 if (unlikely(refill_actual < refill_required)) {
1670 device_printf_dbg(rx_ring->dev,
1671 "%s: not filling rx queue %d\n", __func__, qid);
1672 }
1673
1674 while (((queued = LIST_FIRST(&rx_ring->lro.lro_active)) != NULL)) {
1675 LIST_REMOVE(queued, next);
1676 tcp_lro_flush(&rx_ring->lro, queued);
1677 }
1678
1679 if (napi != 0) {
1680 rx_ring->enqueue_is_running = 0;
1681 al_data_memory_barrier();
1682 }
1683 /* unmask irq */
1684 al_eth_irq_config(rx_ring->unmask_reg_offset, rx_ring->unmask_val);
1685 }
1686
1687 static void
al_eth_start_xmit(void * arg,int pending)1688 al_eth_start_xmit(void *arg, int pending)
1689 {
1690 struct al_eth_ring *tx_ring = arg;
1691 struct mbuf *mbuf;
1692
1693 if (napi != 0) {
1694 tx_ring->enqueue_is_running = 1;
1695 al_data_memory_barrier();
1696 }
1697
1698 while (1) {
1699 mtx_lock(&tx_ring->br_mtx);
1700 mbuf = drbr_dequeue(NULL, tx_ring->br);
1701 mtx_unlock(&tx_ring->br_mtx);
1702
1703 if (mbuf == NULL)
1704 break;
1705
1706 al_eth_xmit_mbuf(tx_ring, mbuf);
1707 }
1708
1709 if (napi != 0) {
1710 tx_ring->enqueue_is_running = 0;
1711 al_data_memory_barrier();
1712 while (1) {
1713 mtx_lock(&tx_ring->br_mtx);
1714 mbuf = drbr_dequeue(NULL, tx_ring->br);
1715 mtx_unlock(&tx_ring->br_mtx);
1716 if (mbuf == NULL)
1717 break;
1718 al_eth_xmit_mbuf(tx_ring, mbuf);
1719 }
1720 }
1721 }
1722
1723 static int
al_mq_start(if_t ifp,struct mbuf * m)1724 al_mq_start(if_t ifp, struct mbuf *m)
1725 {
1726 struct al_eth_adapter *adapter = if_getsoftc(ifp);
1727 struct al_eth_ring *tx_ring;
1728 int i;
1729 int ret;
1730
1731 /* Which queue to use */
1732 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1733 i = m->m_pkthdr.flowid % adapter->num_tx_queues;
1734 else
1735 i = curcpu % adapter->num_tx_queues;
1736
1737 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1738 IFF_DRV_RUNNING) {
1739 return (EFAULT);
1740 }
1741
1742 tx_ring = &adapter->tx_ring[i];
1743
1744 device_printf_dbg(adapter->dev, "dgb start() - assuming link is active, "
1745 "sending packet to queue %d\n", i);
1746
1747 ret = drbr_enqueue(ifp, tx_ring->br, m);
1748
1749 /*
1750 * For napi, if work is not running, schedule it. Always schedule
1751 * for casual (non-napi) packet handling.
1752 */
1753 if ((napi == 0) || ((napi != 0) && (tx_ring->enqueue_is_running == 0)))
1754 taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
1755
1756 return (ret);
1757 }
1758
1759 static void
al_qflush(if_t ifp)1760 al_qflush(if_t ifp)
1761 {
1762
1763 /* unused */
1764 }
1765
1766 static inline void
al_eth_flow_ctrl_init(struct al_eth_adapter * adapter)1767 al_eth_flow_ctrl_init(struct al_eth_adapter *adapter)
1768 {
1769 uint8_t default_flow_ctrl;
1770
1771 default_flow_ctrl = AL_ETH_FLOW_CTRL_TX_PAUSE;
1772 default_flow_ctrl |= AL_ETH_FLOW_CTRL_RX_PAUSE;
1773
1774 adapter->link_config.flow_ctrl_supported = default_flow_ctrl;
1775 }
1776
1777 static int
al_eth_flow_ctrl_config(struct al_eth_adapter * adapter)1778 al_eth_flow_ctrl_config(struct al_eth_adapter *adapter)
1779 {
1780 struct al_eth_flow_control_params *flow_ctrl_params;
1781 uint8_t active = adapter->link_config.flow_ctrl_active;
1782 int i;
1783
1784 flow_ctrl_params = &adapter->flow_ctrl_params;
1785
1786 flow_ctrl_params->type = AL_ETH_FLOW_CONTROL_TYPE_LINK_PAUSE;
1787 flow_ctrl_params->obay_enable =
1788 ((active & AL_ETH_FLOW_CTRL_RX_PAUSE) != 0);
1789 flow_ctrl_params->gen_enable =
1790 ((active & AL_ETH_FLOW_CTRL_TX_PAUSE) != 0);
1791
1792 flow_ctrl_params->rx_fifo_th_high = AL_ETH_FLOW_CTRL_RX_FIFO_TH_HIGH;
1793 flow_ctrl_params->rx_fifo_th_low = AL_ETH_FLOW_CTRL_RX_FIFO_TH_LOW;
1794 flow_ctrl_params->quanta = AL_ETH_FLOW_CTRL_QUANTA;
1795 flow_ctrl_params->quanta_th = AL_ETH_FLOW_CTRL_QUANTA_TH;
1796
1797 /* map priority to queue index, queue id = priority/2 */
1798 for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++)
1799 flow_ctrl_params->prio_q_map[0][i] = 1 << (i >> 1);
1800
1801 al_eth_flow_control_config(&adapter->hal_adapter, flow_ctrl_params);
1802
1803 return (0);
1804 }
1805
1806 static void
al_eth_flow_ctrl_enable(struct al_eth_adapter * adapter)1807 al_eth_flow_ctrl_enable(struct al_eth_adapter *adapter)
1808 {
1809
1810 /*
1811 * change the active configuration to the default / force by ethtool
1812 * and call to configure
1813 */
1814 adapter->link_config.flow_ctrl_active =
1815 adapter->link_config.flow_ctrl_supported;
1816
1817 al_eth_flow_ctrl_config(adapter);
1818 }
1819
1820 static void
al_eth_flow_ctrl_disable(struct al_eth_adapter * adapter)1821 al_eth_flow_ctrl_disable(struct al_eth_adapter *adapter)
1822 {
1823
1824 adapter->link_config.flow_ctrl_active = 0;
1825 al_eth_flow_ctrl_config(adapter);
1826 }
1827
1828 static int
al_eth_hw_init(struct al_eth_adapter * adapter)1829 al_eth_hw_init(struct al_eth_adapter *adapter)
1830 {
1831 int rc;
1832
1833 rc = al_eth_hw_init_adapter(adapter);
1834 if (rc != 0)
1835 return (rc);
1836
1837 rc = al_eth_mac_config(&adapter->hal_adapter, adapter->mac_mode);
1838 if (rc < 0) {
1839 device_printf(adapter->dev, "%s failed to configure mac!\n",
1840 __func__);
1841 return (rc);
1842 }
1843
1844 if ((adapter->mac_mode == AL_ETH_MAC_MODE_SGMII) ||
1845 (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII &&
1846 adapter->phy_exist == false)) {
1847 rc = al_eth_mac_link_config(&adapter->hal_adapter,
1848 adapter->link_config.force_1000_base_x,
1849 adapter->link_config.autoneg,
1850 adapter->link_config.active_speed,
1851 adapter->link_config.active_duplex);
1852 if (rc != 0) {
1853 device_printf(adapter->dev,
1854 "%s failed to configure link parameters!\n",
1855 __func__);
1856 return (rc);
1857 }
1858 }
1859
1860 rc = al_eth_mdio_config(&adapter->hal_adapter,
1861 AL_ETH_MDIO_TYPE_CLAUSE_22, AL_TRUE /* shared_mdio_if */,
1862 adapter->ref_clk_freq, adapter->mdio_freq);
1863 if (rc != 0) {
1864 device_printf(adapter->dev, "%s failed at mdio config!\n",
1865 __func__);
1866 return (rc);
1867 }
1868
1869 al_eth_flow_ctrl_init(adapter);
1870
1871 return (rc);
1872 }
1873
1874 static int
al_eth_hw_stop(struct al_eth_adapter * adapter)1875 al_eth_hw_stop(struct al_eth_adapter *adapter)
1876 {
1877
1878 al_eth_mac_stop(&adapter->hal_adapter);
1879
1880 /*
1881 * wait till pending rx packets written and UDMA becomes idle,
1882 * the MAC has ~10KB fifo, 10us should be enough time for the
1883 * UDMA to write to the memory
1884 */
1885 DELAY(10);
1886
1887 al_eth_adapter_stop(&adapter->hal_adapter);
1888
1889 adapter->flags |= AL_ETH_FLAG_RESET_REQUESTED;
1890
1891 /* disable flow ctrl to avoid pause packets*/
1892 al_eth_flow_ctrl_disable(adapter);
1893
1894 return (0);
1895 }
1896
1897 /*
1898 * al_eth_intr_intx_all - Legacy Interrupt Handler for all interrupts
1899 * @irq: interrupt number
1900 * @data: pointer to a network interface device structure
1901 */
1902 static int
al_eth_intr_intx_all(void * data)1903 al_eth_intr_intx_all(void *data)
1904 {
1905 struct al_eth_adapter *adapter = data;
1906
1907 struct unit_regs __iomem *regs_base =
1908 (struct unit_regs __iomem *)adapter->udma_base;
1909 uint32_t reg;
1910
1911 reg = al_udma_iofic_read_cause(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
1912 AL_INT_GROUP_A);
1913 if (likely(reg))
1914 device_printf_dbg(adapter->dev, "%s group A cause %x\n",
1915 __func__, reg);
1916
1917 if (unlikely(reg & AL_INT_GROUP_A_GROUP_D_SUM)) {
1918 struct al_iofic_grp_ctrl __iomem *sec_ints_base;
1919 uint32_t cause_d = al_udma_iofic_read_cause(regs_base,
1920 AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_D);
1921
1922 sec_ints_base =
1923 ®s_base->gen.interrupt_regs.secondary_iofic_ctrl[0];
1924 if (cause_d != 0) {
1925 device_printf_dbg(adapter->dev,
1926 "got interrupt from group D. cause %x\n", cause_d);
1927
1928 cause_d = al_iofic_read_cause(sec_ints_base,
1929 AL_INT_GROUP_A);
1930 device_printf(adapter->dev,
1931 "secondary A cause %x\n", cause_d);
1932
1933 cause_d = al_iofic_read_cause(sec_ints_base,
1934 AL_INT_GROUP_B);
1935
1936 device_printf_dbg(adapter->dev,
1937 "secondary B cause %x\n", cause_d);
1938 }
1939 }
1940 if ((reg & AL_INT_GROUP_A_GROUP_B_SUM) != 0 ) {
1941 uint32_t cause_b = al_udma_iofic_read_cause(regs_base,
1942 AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B);
1943 int qid;
1944 device_printf_dbg(adapter->dev, "secondary B cause %x\n",
1945 cause_b);
1946 for (qid = 0; qid < adapter->num_rx_queues; qid++) {
1947 if (cause_b & (1 << qid)) {
1948 /* mask */
1949 al_udma_iofic_mask(
1950 (struct unit_regs __iomem *)adapter->udma_base,
1951 AL_UDMA_IOFIC_LEVEL_PRIMARY,
1952 AL_INT_GROUP_B, 1 << qid);
1953 }
1954 }
1955 }
1956 if ((reg & AL_INT_GROUP_A_GROUP_C_SUM) != 0) {
1957 uint32_t cause_c = al_udma_iofic_read_cause(regs_base,
1958 AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C);
1959 int qid;
1960 device_printf_dbg(adapter->dev, "secondary C cause %x\n", cause_c);
1961 for (qid = 0; qid < adapter->num_tx_queues; qid++) {
1962 if ((cause_c & (1 << qid)) != 0) {
1963 al_udma_iofic_mask(
1964 (struct unit_regs __iomem *)adapter->udma_base,
1965 AL_UDMA_IOFIC_LEVEL_PRIMARY,
1966 AL_INT_GROUP_C, 1 << qid);
1967 }
1968 }
1969 }
1970
1971 al_eth_tx_cmlp_irq_filter(adapter->tx_ring);
1972
1973 return (0);
1974 }
1975
1976 static int
al_eth_intr_msix_all(void * data)1977 al_eth_intr_msix_all(void *data)
1978 {
1979 struct al_eth_adapter *adapter = data;
1980
1981 device_printf_dbg(adapter->dev, "%s\n", __func__);
1982 return (0);
1983 }
1984
1985 static int
al_eth_intr_msix_mgmt(void * data)1986 al_eth_intr_msix_mgmt(void *data)
1987 {
1988 struct al_eth_adapter *adapter = data;
1989
1990 device_printf_dbg(adapter->dev, "%s\n", __func__);
1991 return (0);
1992 }
1993
1994 static int
al_eth_enable_msix(struct al_eth_adapter * adapter)1995 al_eth_enable_msix(struct al_eth_adapter *adapter)
1996 {
1997 int i, msix_vecs, rc, count;
1998
1999 device_printf_dbg(adapter->dev, "%s\n", __func__);
2000 msix_vecs = 1 + adapter->num_rx_queues + adapter->num_tx_queues;
2001
2002 device_printf_dbg(adapter->dev,
2003 "Try to enable MSIX, vector numbers = %d\n", msix_vecs);
2004
2005 adapter->msix_entries = malloc(msix_vecs*sizeof(*adapter->msix_entries),
2006 M_IFAL, M_ZERO | M_WAITOK);
2007
2008 if (adapter->msix_entries == NULL) {
2009 device_printf_dbg(adapter->dev, "failed to allocate"
2010 " msix_entries %d\n", msix_vecs);
2011 rc = ENOMEM;
2012 goto exit;
2013 }
2014
2015 /* management vector (GROUP_A) @2*/
2016 adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].entry = 2;
2017 adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector = 0;
2018
2019 /* rx queues start @3 */
2020 for (i = 0; i < adapter->num_rx_queues; i++) {
2021 int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i);
2022
2023 adapter->msix_entries[irq_idx].entry = 3 + i;
2024 adapter->msix_entries[irq_idx].vector = 0;
2025 }
2026 /* tx queues start @7 */
2027 for (i = 0; i < adapter->num_tx_queues; i++) {
2028 int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i);
2029
2030 adapter->msix_entries[irq_idx].entry = 3 +
2031 AL_ETH_MAX_HW_QUEUES + i;
2032 adapter->msix_entries[irq_idx].vector = 0;
2033 }
2034
2035 count = msix_vecs + 2; /* entries start from 2 */
2036 rc = pci_alloc_msix(adapter->dev, &count);
2037
2038 if (rc != 0) {
2039 device_printf_dbg(adapter->dev, "failed to allocate MSIX "
2040 "vectors %d\n", msix_vecs+2);
2041 device_printf_dbg(adapter->dev, "ret = %d\n", rc);
2042 goto msix_entries_exit;
2043 }
2044
2045 if (count != msix_vecs + 2) {
2046 device_printf_dbg(adapter->dev, "failed to allocate all MSIX "
2047 "vectors %d, allocated %d\n", msix_vecs+2, count);
2048 rc = ENOSPC;
2049 goto msix_entries_exit;
2050 }
2051
2052 for (i = 0; i < msix_vecs; i++)
2053 adapter->msix_entries[i].vector = 2 + 1 + i;
2054
2055 device_printf_dbg(adapter->dev, "successfully enabled MSIX,"
2056 " vectors %d\n", msix_vecs);
2057
2058 adapter->msix_vecs = msix_vecs;
2059 adapter->flags |= AL_ETH_FLAG_MSIX_ENABLED;
2060 goto exit;
2061
2062 msix_entries_exit:
2063 adapter->msix_vecs = 0;
2064 free(adapter->msix_entries, M_IFAL);
2065 adapter->msix_entries = NULL;
2066
2067 exit:
2068 return (rc);
2069 }
2070
2071 static int
al_eth_setup_int_mode(struct al_eth_adapter * adapter)2072 al_eth_setup_int_mode(struct al_eth_adapter *adapter)
2073 {
2074 int i, rc;
2075
2076 rc = al_eth_enable_msix(adapter);
2077 if (rc != 0) {
2078 device_printf(adapter->dev, "Failed to enable MSIX mode.\n");
2079 return (rc);
2080 }
2081
2082 adapter->irq_vecs = max(1, adapter->msix_vecs);
2083 /* single INTX mode */
2084 if (adapter->msix_vecs == 0) {
2085 snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name,
2086 AL_ETH_IRQNAME_SIZE, "al-eth-intx-all@pci:%s",
2087 device_get_name(adapter->dev));
2088 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler =
2089 al_eth_intr_intx_all;
2090 /* IRQ vector will be resolved from device resources */
2091 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector = 0;
2092 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2093
2094 device_printf(adapter->dev, "%s and vector %d \n", __func__,
2095 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector);
2096
2097 return (0);
2098 }
2099 /* single MSI-X mode */
2100 if (adapter->msix_vecs == 1) {
2101 snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name,
2102 AL_ETH_IRQNAME_SIZE, "al-eth-msix-all@pci:%s",
2103 device_get_name(adapter->dev));
2104 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler =
2105 al_eth_intr_msix_all;
2106 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector =
2107 adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector;
2108 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2109
2110 return (0);
2111 }
2112 /* MSI-X per queue */
2113 snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name, AL_ETH_IRQNAME_SIZE,
2114 "al-eth-msix-mgmt@pci:%s", device_get_name(adapter->dev));
2115 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler = al_eth_intr_msix_mgmt;
2116
2117 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2118 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector =
2119 adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector;
2120
2121 for (i = 0; i < adapter->num_rx_queues; i++) {
2122 int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i);
2123
2124 snprintf(adapter->irq_tbl[irq_idx].name, AL_ETH_IRQNAME_SIZE,
2125 "al-eth-rx-comp-%d@pci:%s", i,
2126 device_get_name(adapter->dev));
2127 adapter->irq_tbl[irq_idx].handler = al_eth_rx_recv_irq_filter;
2128 adapter->irq_tbl[irq_idx].data = &adapter->rx_ring[i];
2129 adapter->irq_tbl[irq_idx].vector =
2130 adapter->msix_entries[irq_idx].vector;
2131 }
2132
2133 for (i = 0; i < adapter->num_tx_queues; i++) {
2134 int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i);
2135
2136 snprintf(adapter->irq_tbl[irq_idx].name,
2137 AL_ETH_IRQNAME_SIZE, "al-eth-tx-comp-%d@pci:%s", i,
2138 device_get_name(adapter->dev));
2139 adapter->irq_tbl[irq_idx].handler = al_eth_tx_cmlp_irq_filter;
2140 adapter->irq_tbl[irq_idx].data = &adapter->tx_ring[i];
2141 adapter->irq_tbl[irq_idx].vector =
2142 adapter->msix_entries[irq_idx].vector;
2143 }
2144
2145 return (0);
2146 }
2147
2148 static void
__al_eth_free_irq(struct al_eth_adapter * adapter)2149 __al_eth_free_irq(struct al_eth_adapter *adapter)
2150 {
2151 struct al_eth_irq *irq;
2152 int i, rc;
2153
2154 for (i = 0; i < adapter->irq_vecs; i++) {
2155 irq = &adapter->irq_tbl[i];
2156 if (irq->requested != 0) {
2157 device_printf_dbg(adapter->dev, "tear down irq: %d\n",
2158 irq->vector);
2159 rc = bus_teardown_intr(adapter->dev, irq->res,
2160 irq->cookie);
2161 if (rc != 0)
2162 device_printf(adapter->dev, "failed to tear "
2163 "down irq: %d\n", irq->vector);
2164 }
2165 irq->requested = 0;
2166 }
2167 }
2168
2169 static void
al_eth_free_irq(struct al_eth_adapter * adapter)2170 al_eth_free_irq(struct al_eth_adapter *adapter)
2171 {
2172 struct al_eth_irq *irq;
2173 int i, rc;
2174 #ifdef CONFIG_RFS_ACCEL
2175 if (adapter->msix_vecs >= 1) {
2176 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
2177 adapter->netdev->rx_cpu_rmap = NULL;
2178 }
2179 #endif
2180
2181 __al_eth_free_irq(adapter);
2182
2183 for (i = 0; i < adapter->irq_vecs; i++) {
2184 irq = &adapter->irq_tbl[i];
2185 if (irq->res == NULL)
2186 continue;
2187 device_printf_dbg(adapter->dev, "release resource irq: %d\n",
2188 irq->vector);
2189 rc = bus_release_resource(adapter->dev, SYS_RES_IRQ, irq->vector,
2190 irq->res);
2191 irq->res = NULL;
2192 if (rc != 0)
2193 device_printf(adapter->dev, "dev has no parent while "
2194 "releasing res for irq: %d\n", irq->vector);
2195 }
2196
2197 pci_release_msi(adapter->dev);
2198
2199 adapter->flags &= ~AL_ETH_FLAG_MSIX_ENABLED;
2200
2201 adapter->msix_vecs = 0;
2202 free(adapter->msix_entries, M_IFAL);
2203 adapter->msix_entries = NULL;
2204 }
2205
2206 static int
al_eth_request_irq(struct al_eth_adapter * adapter)2207 al_eth_request_irq(struct al_eth_adapter *adapter)
2208 {
2209 unsigned long flags;
2210 struct al_eth_irq *irq;
2211 int rc = 0, i, v;
2212
2213 if ((adapter->flags & AL_ETH_FLAG_MSIX_ENABLED) != 0)
2214 flags = RF_ACTIVE;
2215 else
2216 flags = RF_ACTIVE | RF_SHAREABLE;
2217
2218 for (i = 0; i < adapter->irq_vecs; i++) {
2219 irq = &adapter->irq_tbl[i];
2220
2221 if (irq->requested != 0)
2222 continue;
2223
2224 irq->res = bus_alloc_resource_any(adapter->dev, SYS_RES_IRQ,
2225 &irq->vector, flags);
2226 if (irq->res == NULL) {
2227 device_printf(adapter->dev, "could not allocate "
2228 "irq vector=%d\n", irq->vector);
2229 rc = ENXIO;
2230 goto exit_res;
2231 }
2232
2233 if ((rc = bus_setup_intr(adapter->dev, irq->res,
2234 INTR_TYPE_NET | INTR_MPSAFE, irq->handler,
2235 NULL, irq->data, &irq->cookie)) != 0) {
2236 device_printf(adapter->dev, "failed to register "
2237 "interrupt handler for irq %ju: %d\n",
2238 (uintmax_t)rman_get_start(irq->res), rc);
2239 goto exit_intr;
2240 }
2241 irq->requested = 1;
2242 }
2243 goto exit;
2244
2245 exit_intr:
2246 v = i - 1; /* -1 because we omit the operation that failed */
2247 while (v-- >= 0) {
2248 int bti;
2249 irq = &adapter->irq_tbl[v];
2250 bti = bus_teardown_intr(adapter->dev, irq->res, irq->cookie);
2251 if (bti != 0) {
2252 device_printf(adapter->dev, "failed to tear "
2253 "down irq: %d\n", irq->vector);
2254 }
2255
2256 irq->requested = 0;
2257 device_printf_dbg(adapter->dev, "exit_intr: releasing irq %d\n",
2258 irq->vector);
2259 }
2260
2261 exit_res:
2262 v = i - 1; /* -1 because we omit the operation that failed */
2263 while (v-- >= 0) {
2264 int brr;
2265 irq = &adapter->irq_tbl[v];
2266 device_printf_dbg(adapter->dev, "exit_res: releasing resource"
2267 " for irq %d\n", irq->vector);
2268 brr = bus_release_resource(adapter->dev, SYS_RES_IRQ,
2269 irq->vector, irq->res);
2270 if (brr != 0)
2271 device_printf(adapter->dev, "dev has no parent while "
2272 "releasing res for irq: %d\n", irq->vector);
2273 irq->res = NULL;
2274 }
2275
2276 exit:
2277 return (rc);
2278 }
2279
2280 /**
2281 * al_eth_setup_tx_resources - allocate Tx resources (Descriptors)
2282 * @adapter: network interface device structure
2283 * @qid: queue index
2284 *
2285 * Return 0 on success, negative on failure
2286 **/
2287 static int
al_eth_setup_tx_resources(struct al_eth_adapter * adapter,int qid)2288 al_eth_setup_tx_resources(struct al_eth_adapter *adapter, int qid)
2289 {
2290 struct al_eth_ring *tx_ring = &adapter->tx_ring[qid];
2291 device_t dev = tx_ring->dev;
2292 struct al_udma_q_params *q_params = &tx_ring->q_params;
2293 int size;
2294 int ret;
2295
2296 if (adapter->up)
2297 return (0);
2298
2299 size = sizeof(struct al_eth_tx_buffer) * tx_ring->sw_count;
2300
2301 tx_ring->tx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK);
2302 if (tx_ring->tx_buffer_info == NULL)
2303 return (ENOMEM);
2304
2305 tx_ring->descs_size = tx_ring->hw_count * sizeof(union al_udma_desc);
2306 q_params->size = tx_ring->hw_count;
2307
2308 ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag,
2309 (bus_dmamap_t *)&q_params->desc_phy_base_map,
2310 (bus_addr_t *)&q_params->desc_phy_base,
2311 (void**)&q_params->desc_base, tx_ring->descs_size);
2312 if (ret != 0) {
2313 device_printf(dev, "failed to al_dma_alloc_coherent,"
2314 " ret = %d\n", ret);
2315 return (ENOMEM);
2316 }
2317
2318 if (q_params->desc_base == NULL)
2319 return (ENOMEM);
2320
2321 device_printf_dbg(dev, "Initializing ring queues %d\n", qid);
2322
2323 /* Allocate Ring Queue */
2324 mtx_init(&tx_ring->br_mtx, "AlRingMtx", NULL, MTX_DEF);
2325 tx_ring->br = buf_ring_alloc(AL_BR_SIZE, M_DEVBUF, M_WAITOK,
2326 &tx_ring->br_mtx);
2327 if (tx_ring->br == NULL) {
2328 device_printf(dev, "Critical Failure setting up buf ring\n");
2329 return (ENOMEM);
2330 }
2331
2332 /* Allocate taskqueues */
2333 TASK_INIT(&tx_ring->enqueue_task, 0, al_eth_start_xmit, tx_ring);
2334 tx_ring->enqueue_tq = taskqueue_create_fast("al_tx_enque", M_NOWAIT,
2335 taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
2336 taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET, "%s txeq",
2337 device_get_nameunit(adapter->dev));
2338 TASK_INIT(&tx_ring->cmpl_task, 0, al_eth_tx_cmpl_work, tx_ring);
2339 tx_ring->cmpl_tq = taskqueue_create_fast("al_tx_cmpl", M_NOWAIT,
2340 taskqueue_thread_enqueue, &tx_ring->cmpl_tq);
2341 taskqueue_start_threads(&tx_ring->cmpl_tq, 1, PI_REALTIME, "%s txcq",
2342 device_get_nameunit(adapter->dev));
2343
2344 /* Setup DMA descriptor areas. */
2345 ret = bus_dma_tag_create(bus_get_dma_tag(dev),
2346 1, 0, /* alignment, bounds */
2347 BUS_SPACE_MAXADDR, /* lowaddr */
2348 BUS_SPACE_MAXADDR, /* highaddr */
2349 NULL, NULL, /* filter, filterarg */
2350 AL_TSO_SIZE, /* maxsize */
2351 AL_ETH_PKT_MAX_BUFS, /* nsegments */
2352 PAGE_SIZE, /* maxsegsize */
2353 0, /* flags */
2354 NULL, /* lockfunc */
2355 NULL, /* lockfuncarg */
2356 &tx_ring->dma_buf_tag);
2357
2358 if (ret != 0) {
2359 device_printf(dev,"Unable to allocate dma_buf_tag, ret = %d\n",
2360 ret);
2361 return (ret);
2362 }
2363
2364 for (size = 0; size < tx_ring->sw_count; size++) {
2365 ret = bus_dmamap_create(tx_ring->dma_buf_tag, 0,
2366 &tx_ring->tx_buffer_info[size].dma_map);
2367 if (ret != 0) {
2368 device_printf(dev, "Unable to map DMA TX "
2369 "buffer memory [iter=%d]\n", size);
2370 return (ret);
2371 }
2372 }
2373
2374 /* completion queue not used for tx */
2375 q_params->cdesc_base = NULL;
2376 /* size in bytes of the udma completion ring descriptor */
2377 q_params->cdesc_size = 8;
2378 tx_ring->next_to_use = 0;
2379 tx_ring->next_to_clean = 0;
2380
2381 return (0);
2382 }
2383
2384 /*
2385 * al_eth_free_tx_resources - Free Tx Resources per Queue
2386 * @adapter: network interface device structure
2387 * @qid: queue index
2388 *
2389 * Free all transmit software resources
2390 */
2391 static void
al_eth_free_tx_resources(struct al_eth_adapter * adapter,int qid)2392 al_eth_free_tx_resources(struct al_eth_adapter *adapter, int qid)
2393 {
2394 struct al_eth_ring *tx_ring = &adapter->tx_ring[qid];
2395 struct al_udma_q_params *q_params = &tx_ring->q_params;
2396 int size;
2397
2398 /* At this point interrupts' handlers must be deactivated */
2399 while (taskqueue_cancel(tx_ring->cmpl_tq, &tx_ring->cmpl_task, NULL))
2400 taskqueue_drain(tx_ring->cmpl_tq, &tx_ring->cmpl_task);
2401
2402 taskqueue_free(tx_ring->cmpl_tq);
2403 while (taskqueue_cancel(tx_ring->enqueue_tq,
2404 &tx_ring->enqueue_task, NULL)) {
2405 taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
2406 }
2407
2408 taskqueue_free(tx_ring->enqueue_tq);
2409
2410 if (tx_ring->br != NULL) {
2411 drbr_flush(adapter->netdev, tx_ring->br);
2412 buf_ring_free(tx_ring->br, M_DEVBUF);
2413 }
2414
2415 for (size = 0; size < tx_ring->sw_count; size++) {
2416 m_freem(tx_ring->tx_buffer_info[size].m);
2417 tx_ring->tx_buffer_info[size].m = NULL;
2418
2419 bus_dmamap_unload(tx_ring->dma_buf_tag,
2420 tx_ring->tx_buffer_info[size].dma_map);
2421 bus_dmamap_destroy(tx_ring->dma_buf_tag,
2422 tx_ring->tx_buffer_info[size].dma_map);
2423 }
2424 bus_dma_tag_destroy(tx_ring->dma_buf_tag);
2425
2426 free(tx_ring->tx_buffer_info, M_IFAL);
2427 tx_ring->tx_buffer_info = NULL;
2428
2429 mtx_destroy(&tx_ring->br_mtx);
2430
2431 /* if not set, then don't free */
2432 if (q_params->desc_base == NULL)
2433 return;
2434
2435 al_dma_free_coherent(q_params->desc_phy_base_tag,
2436 q_params->desc_phy_base_map, q_params->desc_base);
2437
2438 q_params->desc_base = NULL;
2439 }
2440
2441 /*
2442 * al_eth_free_all_tx_resources - Free Tx Resources for All Queues
2443 * @adapter: board private structure
2444 *
2445 * Free all transmit software resources
2446 */
2447 static void
al_eth_free_all_tx_resources(struct al_eth_adapter * adapter)2448 al_eth_free_all_tx_resources(struct al_eth_adapter *adapter)
2449 {
2450 int i;
2451
2452 for (i = 0; i < adapter->num_tx_queues; i++)
2453 if (adapter->tx_ring[i].q_params.desc_base)
2454 al_eth_free_tx_resources(adapter, i);
2455 }
2456
2457 /*
2458 * al_eth_setup_rx_resources - allocate Rx resources (Descriptors)
2459 * @adapter: network interface device structure
2460 * @qid: queue index
2461 *
2462 * Returns 0 on success, negative on failure
2463 */
2464 static int
al_eth_setup_rx_resources(struct al_eth_adapter * adapter,unsigned int qid)2465 al_eth_setup_rx_resources(struct al_eth_adapter *adapter, unsigned int qid)
2466 {
2467 struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
2468 device_t dev = rx_ring->dev;
2469 struct al_udma_q_params *q_params = &rx_ring->q_params;
2470 int size;
2471 int ret;
2472
2473 size = sizeof(struct al_eth_rx_buffer) * rx_ring->sw_count;
2474
2475 /* alloc extra element so in rx path we can always prefetch rx_info + 1 */
2476 size += 1;
2477
2478 rx_ring->rx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK);
2479 if (rx_ring->rx_buffer_info == NULL)
2480 return (ENOMEM);
2481
2482 rx_ring->descs_size = rx_ring->hw_count * sizeof(union al_udma_desc);
2483 q_params->size = rx_ring->hw_count;
2484
2485 ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag,
2486 &q_params->desc_phy_base_map,
2487 (bus_addr_t *)&q_params->desc_phy_base,
2488 (void**)&q_params->desc_base, rx_ring->descs_size);
2489
2490 if ((q_params->desc_base == NULL) || (ret != 0))
2491 return (ENOMEM);
2492
2493 /* size in bytes of the udma completion ring descriptor */
2494 q_params->cdesc_size = 16;
2495 rx_ring->cdescs_size = rx_ring->hw_count * q_params->cdesc_size;
2496 ret = al_dma_alloc_coherent(dev, &q_params->cdesc_phy_base_tag,
2497 &q_params->cdesc_phy_base_map,
2498 (bus_addr_t *)&q_params->cdesc_phy_base,
2499 (void**)&q_params->cdesc_base, rx_ring->cdescs_size);
2500
2501 if ((q_params->cdesc_base == NULL) || (ret != 0))
2502 return (ENOMEM);
2503
2504 /* Allocate taskqueues */
2505 NET_TASK_INIT(&rx_ring->enqueue_task, 0, al_eth_rx_recv_work, rx_ring);
2506 rx_ring->enqueue_tq = taskqueue_create_fast("al_rx_enque", M_NOWAIT,
2507 taskqueue_thread_enqueue, &rx_ring->enqueue_tq);
2508 taskqueue_start_threads(&rx_ring->enqueue_tq, 1, PI_NET, "%s rxeq",
2509 device_get_nameunit(adapter->dev));
2510
2511 /* Setup DMA descriptor areas. */
2512 ret = bus_dma_tag_create(bus_get_dma_tag(dev),
2513 1, 0, /* alignment, bounds */
2514 BUS_SPACE_MAXADDR, /* lowaddr */
2515 BUS_SPACE_MAXADDR, /* highaddr */
2516 NULL, NULL, /* filter, filterarg */
2517 AL_TSO_SIZE, /* maxsize */
2518 1, /* nsegments */
2519 AL_TSO_SIZE, /* maxsegsize */
2520 0, /* flags */
2521 NULL, /* lockfunc */
2522 NULL, /* lockfuncarg */
2523 &rx_ring->dma_buf_tag);
2524
2525 if (ret != 0) {
2526 device_printf(dev,"Unable to allocate RX dma_buf_tag\n");
2527 return (ret);
2528 }
2529
2530 for (size = 0; size < rx_ring->sw_count; size++) {
2531 ret = bus_dmamap_create(rx_ring->dma_buf_tag, 0,
2532 &rx_ring->rx_buffer_info[size].dma_map);
2533 if (ret != 0) {
2534 device_printf(dev,"Unable to map DMA RX buffer memory\n");
2535 return (ret);
2536 }
2537 }
2538
2539 /* Zero out the descriptor ring */
2540 memset(q_params->cdesc_base, 0, rx_ring->cdescs_size);
2541
2542 /* Create LRO for the ring */
2543 if ((if_getcapenable(adapter->netdev) & IFCAP_LRO) != 0) {
2544 int err = tcp_lro_init(&rx_ring->lro);
2545 if (err != 0) {
2546 device_printf(adapter->dev,
2547 "LRO[%d] Initialization failed!\n", qid);
2548 } else {
2549 device_printf_dbg(adapter->dev,
2550 "RX Soft LRO[%d] Initialized\n", qid);
2551 rx_ring->lro_enabled = true;
2552 rx_ring->lro.ifp = adapter->netdev;
2553 }
2554 }
2555
2556 rx_ring->next_to_clean = 0;
2557 rx_ring->next_to_use = 0;
2558
2559 return (0);
2560 }
2561
2562 /*
2563 * al_eth_free_rx_resources - Free Rx Resources
2564 * @adapter: network interface device structure
2565 * @qid: queue index
2566 *
2567 * Free all receive software resources
2568 */
2569 static void
al_eth_free_rx_resources(struct al_eth_adapter * adapter,unsigned int qid)2570 al_eth_free_rx_resources(struct al_eth_adapter *adapter, unsigned int qid)
2571 {
2572 struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
2573 struct al_udma_q_params *q_params = &rx_ring->q_params;
2574 int size;
2575
2576 /* At this point interrupts' handlers must be deactivated */
2577 while (taskqueue_cancel(rx_ring->enqueue_tq,
2578 &rx_ring->enqueue_task, NULL)) {
2579 taskqueue_drain(rx_ring->enqueue_tq, &rx_ring->enqueue_task);
2580 }
2581
2582 taskqueue_free(rx_ring->enqueue_tq);
2583
2584 for (size = 0; size < rx_ring->sw_count; size++) {
2585 m_freem(rx_ring->rx_buffer_info[size].m);
2586 rx_ring->rx_buffer_info[size].m = NULL;
2587 bus_dmamap_unload(rx_ring->dma_buf_tag,
2588 rx_ring->rx_buffer_info[size].dma_map);
2589 bus_dmamap_destroy(rx_ring->dma_buf_tag,
2590 rx_ring->rx_buffer_info[size].dma_map);
2591 }
2592 bus_dma_tag_destroy(rx_ring->dma_buf_tag);
2593
2594 free(rx_ring->rx_buffer_info, M_IFAL);
2595 rx_ring->rx_buffer_info = NULL;
2596
2597 /* if not set, then don't free */
2598 if (q_params->desc_base == NULL)
2599 return;
2600
2601 al_dma_free_coherent(q_params->desc_phy_base_tag,
2602 q_params->desc_phy_base_map, q_params->desc_base);
2603
2604 q_params->desc_base = NULL;
2605
2606 /* if not set, then don't free */
2607 if (q_params->cdesc_base == NULL)
2608 return;
2609
2610 al_dma_free_coherent(q_params->cdesc_phy_base_tag,
2611 q_params->cdesc_phy_base_map, q_params->cdesc_base);
2612
2613 q_params->cdesc_phy_base = 0;
2614
2615 /* Free LRO resources */
2616 tcp_lro_free(&rx_ring->lro);
2617 }
2618
2619 /*
2620 * al_eth_free_all_rx_resources - Free Rx Resources for All Queues
2621 * @adapter: board private structure
2622 *
2623 * Free all receive software resources
2624 */
2625 static void
al_eth_free_all_rx_resources(struct al_eth_adapter * adapter)2626 al_eth_free_all_rx_resources(struct al_eth_adapter *adapter)
2627 {
2628 int i;
2629
2630 for (i = 0; i < adapter->num_rx_queues; i++)
2631 if (adapter->rx_ring[i].q_params.desc_base != 0)
2632 al_eth_free_rx_resources(adapter, i);
2633 }
2634
2635 /*
2636 * al_eth_setup_all_rx_resources - allocate all queues Rx resources
2637 * @adapter: board private structure
2638 *
2639 * Return 0 on success, negative on failure
2640 */
2641 static int
al_eth_setup_all_rx_resources(struct al_eth_adapter * adapter)2642 al_eth_setup_all_rx_resources(struct al_eth_adapter *adapter)
2643 {
2644 int i, rc = 0;
2645
2646 for (i = 0; i < adapter->num_rx_queues; i++) {
2647 rc = al_eth_setup_rx_resources(adapter, i);
2648 if (rc == 0)
2649 continue;
2650
2651 device_printf(adapter->dev, "Allocation for Rx Queue %u failed\n", i);
2652 goto err_setup_rx;
2653 }
2654 return (0);
2655
2656 err_setup_rx:
2657 /* rewind the index freeing the rings as we go */
2658 while (i--)
2659 al_eth_free_rx_resources(adapter, i);
2660 return (rc);
2661 }
2662
2663 /*
2664 * al_eth_setup_all_tx_resources - allocate all queues Tx resources
2665 * @adapter: private structure
2666 *
2667 * Return 0 on success, negative on failure
2668 */
2669 static int
al_eth_setup_all_tx_resources(struct al_eth_adapter * adapter)2670 al_eth_setup_all_tx_resources(struct al_eth_adapter *adapter)
2671 {
2672 int i, rc = 0;
2673
2674 for (i = 0; i < adapter->num_tx_queues; i++) {
2675 rc = al_eth_setup_tx_resources(adapter, i);
2676 if (rc == 0)
2677 continue;
2678
2679 device_printf(adapter->dev,
2680 "Allocation for Tx Queue %u failed\n", i);
2681 goto err_setup_tx;
2682 }
2683
2684 return (0);
2685
2686 err_setup_tx:
2687 /* rewind the index freeing the rings as we go */
2688 while (i--)
2689 al_eth_free_tx_resources(adapter, i);
2690
2691 return (rc);
2692 }
2693
2694 static void
al_eth_disable_int_sync(struct al_eth_adapter * adapter)2695 al_eth_disable_int_sync(struct al_eth_adapter *adapter)
2696 {
2697
2698 /* disable forwarding interrupts from eth through pci end point */
2699 if ((adapter->board_type == ALPINE_FPGA_NIC) ||
2700 (adapter->board_type == ALPINE_NIC)) {
2701 al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base +
2702 AL_REG_OFFSET_FORWARD_INTR, AL_DIS_FORWARD_INTR);
2703 }
2704
2705 /* mask hw interrupts */
2706 al_eth_interrupts_mask(adapter);
2707 }
2708
2709 static void
al_eth_interrupts_unmask(struct al_eth_adapter * adapter)2710 al_eth_interrupts_unmask(struct al_eth_adapter *adapter)
2711 {
2712 uint32_t group_a_mask = AL_INT_GROUP_A_GROUP_D_SUM; /* enable group D summery */
2713 uint32_t group_b_mask = (1 << adapter->num_rx_queues) - 1;/* bit per Rx q*/
2714 uint32_t group_c_mask = (1 << adapter->num_tx_queues) - 1;/* bit per Tx q*/
2715 uint32_t group_d_mask = 3 << 8;
2716 struct unit_regs __iomem *regs_base =
2717 (struct unit_regs __iomem *)adapter->udma_base;
2718
2719 if (adapter->int_mode == AL_IOFIC_MODE_LEGACY)
2720 group_a_mask |= AL_INT_GROUP_A_GROUP_B_SUM |
2721 AL_INT_GROUP_A_GROUP_C_SUM |
2722 AL_INT_GROUP_A_GROUP_D_SUM;
2723
2724 al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2725 AL_INT_GROUP_A, group_a_mask);
2726 al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2727 AL_INT_GROUP_B, group_b_mask);
2728 al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2729 AL_INT_GROUP_C, group_c_mask);
2730 al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2731 AL_INT_GROUP_D, group_d_mask);
2732 }
2733
2734 static void
al_eth_interrupts_mask(struct al_eth_adapter * adapter)2735 al_eth_interrupts_mask(struct al_eth_adapter *adapter)
2736 {
2737 struct unit_regs __iomem *regs_base =
2738 (struct unit_regs __iomem *)adapter->udma_base;
2739
2740 /* mask all interrupts */
2741 al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2742 AL_INT_GROUP_A, AL_MASK_GROUP_A_INT);
2743 al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2744 AL_INT_GROUP_B, AL_MASK_GROUP_B_INT);
2745 al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2746 AL_INT_GROUP_C, AL_MASK_GROUP_C_INT);
2747 al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2748 AL_INT_GROUP_D, AL_MASK_GROUP_D_INT);
2749 }
2750
2751 static int
al_eth_configure_int_mode(struct al_eth_adapter * adapter)2752 al_eth_configure_int_mode(struct al_eth_adapter *adapter)
2753 {
2754 enum al_iofic_mode int_mode;
2755 uint32_t m2s_errors_disable = AL_M2S_MASK_INIT;
2756 uint32_t m2s_aborts_disable = AL_M2S_MASK_INIT;
2757 uint32_t s2m_errors_disable = AL_S2M_MASK_INIT;
2758 uint32_t s2m_aborts_disable = AL_S2M_MASK_INIT;
2759
2760 /* single INTX mode */
2761 if (adapter->msix_vecs == 0)
2762 int_mode = AL_IOFIC_MODE_LEGACY;
2763 else if (adapter->msix_vecs > 1)
2764 int_mode = AL_IOFIC_MODE_MSIX_PER_Q;
2765 else {
2766 device_printf(adapter->dev,
2767 "udma doesn't support single MSI-X mode yet.\n");
2768 return (EIO);
2769 }
2770
2771 if (adapter->board_type != ALPINE_INTEGRATED) {
2772 m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT;
2773 m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT;
2774 s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT;
2775 s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT;
2776 }
2777
2778 if (al_udma_iofic_config((struct unit_regs __iomem *)adapter->udma_base,
2779 int_mode, m2s_errors_disable, m2s_aborts_disable,
2780 s2m_errors_disable, s2m_aborts_disable)) {
2781 device_printf(adapter->dev,
2782 "al_udma_unit_int_config failed!.\n");
2783 return (EIO);
2784 }
2785 adapter->int_mode = int_mode;
2786 device_printf_dbg(adapter->dev, "using %s interrupt mode\n",
2787 int_mode == AL_IOFIC_MODE_LEGACY ? "INTx" :
2788 int_mode == AL_IOFIC_MODE_MSIX_PER_Q ? "MSI-X per Queue" : "Unknown");
2789 /* set interrupt moderation resolution to 15us */
2790 al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_B, 15);
2791 al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_C, 15);
2792 /* by default interrupt coalescing is disabled */
2793 adapter->tx_usecs = 0;
2794 adapter->rx_usecs = 0;
2795
2796 return (0);
2797 }
2798
2799 /*
2800 * ethtool_rxfh_indir_default - get default value for RX flow hash indirection
2801 * @index: Index in RX flow hash indirection table
2802 * @n_rx_rings: Number of RX rings to use
2803 *
2804 * This function provides the default policy for RX flow hash indirection.
2805 */
2806 static inline uint32_t
ethtool_rxfh_indir_default(uint32_t index,uint32_t n_rx_rings)2807 ethtool_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings)
2808 {
2809
2810 return (index % n_rx_rings);
2811 }
2812
2813 static void*
al_eth_update_stats(struct al_eth_adapter * adapter)2814 al_eth_update_stats(struct al_eth_adapter *adapter)
2815 {
2816 struct al_eth_mac_stats *mac_stats = &adapter->mac_stats;
2817
2818 if (adapter->up == 0)
2819 return (NULL);
2820
2821 al_eth_mac_stats_get(&adapter->hal_adapter, mac_stats);
2822
2823 return (NULL);
2824 }
2825
2826 static uint64_t
al_get_counter(if_t ifp,ift_counter cnt)2827 al_get_counter(if_t ifp, ift_counter cnt)
2828 {
2829 struct al_eth_adapter *adapter;
2830 struct al_eth_mac_stats *mac_stats;
2831 uint64_t rv;
2832
2833 adapter = if_getsoftc(ifp);
2834 mac_stats = &adapter->mac_stats;
2835
2836 switch (cnt) {
2837 case IFCOUNTER_IPACKETS:
2838 return (mac_stats->aFramesReceivedOK); /* including pause frames */
2839 case IFCOUNTER_OPACKETS:
2840 return (mac_stats->aFramesTransmittedOK);
2841 case IFCOUNTER_IBYTES:
2842 return (mac_stats->aOctetsReceivedOK);
2843 case IFCOUNTER_OBYTES:
2844 return (mac_stats->aOctetsTransmittedOK);
2845 case IFCOUNTER_IMCASTS:
2846 return (mac_stats->ifInMulticastPkts);
2847 case IFCOUNTER_OMCASTS:
2848 return (mac_stats->ifOutMulticastPkts);
2849 case IFCOUNTER_COLLISIONS:
2850 return (0);
2851 case IFCOUNTER_IQDROPS:
2852 return (mac_stats->etherStatsDropEvents);
2853 case IFCOUNTER_IERRORS:
2854 rv = mac_stats->ifInErrors +
2855 mac_stats->etherStatsUndersizePkts + /* good but short */
2856 mac_stats->etherStatsFragments + /* short and bad*/
2857 mac_stats->etherStatsJabbers + /* with crc errors */
2858 mac_stats->etherStatsOversizePkts +
2859 mac_stats->aFrameCheckSequenceErrors +
2860 mac_stats->aAlignmentErrors;
2861 return (rv);
2862 case IFCOUNTER_OERRORS:
2863 return (mac_stats->ifOutErrors);
2864 default:
2865 return (if_get_counter_default(ifp, cnt));
2866 }
2867 }
2868
2869 static u_int
al_count_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)2870 al_count_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2871 {
2872 unsigned char *mac;
2873
2874 mac = LLADDR(sdl);
2875 /* default mc address inside mac address */
2876 if (mac[3] != 0 && mac[4] != 0 && mac[5] != 1)
2877 return (1);
2878 else
2879 return (0);
2880 }
2881
2882 static u_int
al_program_addr(void * arg,struct sockaddr_dl * sdl,u_int cnt)2883 al_program_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2884 {
2885 struct al_eth_adapter *adapter = arg;
2886
2887 al_eth_mac_table_unicast_add(adapter,
2888 AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1 + cnt, 1);
2889
2890 return (1);
2891 }
2892
2893 /*
2894 * Unicast, Multicast and Promiscuous mode set
2895 *
2896 * The set_rx_mode entry point is called whenever the unicast or multicast
2897 * address lists or the network interface flags are updated. This routine is
2898 * responsible for configuring the hardware for proper unicast, multicast,
2899 * promiscuous mode, and all-multi behavior.
2900 */
2901 static void
al_eth_set_rx_mode(struct al_eth_adapter * adapter)2902 al_eth_set_rx_mode(struct al_eth_adapter *adapter)
2903 {
2904 if_t ifp = adapter->netdev;
2905 int mc, uc;
2906 uint8_t i;
2907
2908 /* XXXGL: why generic count won't work? */
2909 mc = if_foreach_llmaddr(ifp, al_count_maddr, NULL);
2910 uc = if_lladdr_count(ifp);
2911
2912 if ((if_getflags(ifp) & IFF_PROMISC) != 0) {
2913 al_eth_mac_table_promiscuous_set(adapter, true);
2914 } else {
2915 if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) {
2916 /* This interface is in all-multicasts mode (used by multicast routers). */
2917 al_eth_mac_table_all_multicast_add(adapter,
2918 AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1);
2919 } else {
2920 if (mc == 0) {
2921 al_eth_mac_table_entry_clear(adapter,
2922 AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX);
2923 } else {
2924 al_eth_mac_table_all_multicast_add(adapter,
2925 AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1);
2926 }
2927 }
2928 if (uc != 0) {
2929 i = AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1;
2930 if (uc > AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT) {
2931 /*
2932 * In this case there are more addresses then
2933 * entries in the mac table - set promiscuous
2934 */
2935 al_eth_mac_table_promiscuous_set(adapter, true);
2936 return;
2937 }
2938
2939 /* clear the last configuration */
2940 while (i < (AL_ETH_MAC_TABLE_UNICAST_IDX_BASE +
2941 AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT)) {
2942 al_eth_mac_table_entry_clear(adapter, i);
2943 i++;
2944 }
2945
2946 /* set new addresses */
2947 if_foreach_lladdr(ifp, al_program_addr, adapter);
2948 }
2949 al_eth_mac_table_promiscuous_set(adapter, false);
2950 }
2951 }
2952
2953 static void
al_eth_config_rx_fwd(struct al_eth_adapter * adapter)2954 al_eth_config_rx_fwd(struct al_eth_adapter *adapter)
2955 {
2956 struct al_eth_fwd_ctrl_table_entry entry;
2957 int i;
2958
2959 /* let priority be equal to pbits */
2960 for (i = 0; i < AL_ETH_FWD_PBITS_TABLE_NUM; i++)
2961 al_eth_fwd_pbits_table_set(&adapter->hal_adapter, i, i);
2962
2963 /* map priority to queue index, queue id = priority/2 */
2964 for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++)
2965 al_eth_fwd_priority_table_set(&adapter->hal_adapter, i, i >> 1);
2966
2967 entry.prio_sel = AL_ETH_CTRL_TABLE_PRIO_SEL_VAL_0;
2968 entry.queue_sel_1 = AL_ETH_CTRL_TABLE_QUEUE_SEL_1_THASH_TABLE;
2969 entry.queue_sel_2 = AL_ETH_CTRL_TABLE_QUEUE_SEL_2_NO_PRIO;
2970 entry.udma_sel = AL_ETH_CTRL_TABLE_UDMA_SEL_MAC_TABLE;
2971 entry.filter = false;
2972
2973 al_eth_ctrl_table_def_set(&adapter->hal_adapter, AL_FALSE, &entry);
2974
2975 /*
2976 * By default set the mac table to forward all unicast packets to our
2977 * MAC address and all broadcast. all the rest will be dropped.
2978 */
2979 al_eth_mac_table_unicast_add(adapter, AL_ETH_MAC_TABLE_UNICAST_IDX_BASE,
2980 1);
2981 al_eth_mac_table_broadcast_add(adapter, AL_ETH_MAC_TABLE_BROADCAST_IDX, 1);
2982 al_eth_mac_table_promiscuous_set(adapter, false);
2983
2984 /* set toeplitz hash keys */
2985 for (i = 0; i < sizeof(adapter->toeplitz_hash_key); i++)
2986 *((uint8_t*)adapter->toeplitz_hash_key + i) = (uint8_t)random();
2987
2988 for (i = 0; i < AL_ETH_RX_HASH_KEY_NUM; i++)
2989 al_eth_hash_key_set(&adapter->hal_adapter, i,
2990 htonl(adapter->toeplitz_hash_key[i]));
2991
2992 for (i = 0; i < AL_ETH_RX_RSS_TABLE_SIZE; i++) {
2993 adapter->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i,
2994 AL_ETH_NUM_QUEUES);
2995 al_eth_set_thash_table_entry(adapter, i, 0,
2996 adapter->rss_ind_tbl[i]);
2997 }
2998
2999 al_eth_fsm_table_init(adapter);
3000 }
3001
3002 static void
al_eth_req_rx_buff_size(struct al_eth_adapter * adapter,int size)3003 al_eth_req_rx_buff_size(struct al_eth_adapter *adapter, int size)
3004 {
3005
3006 /*
3007 * Determine the correct mbuf pool
3008 * for doing jumbo frames
3009 * Try from the smallest up to maximum supported
3010 */
3011 adapter->rx_mbuf_sz = MCLBYTES;
3012 if (size > 2048) {
3013 if (adapter->max_rx_buff_alloc_size > 2048)
3014 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3015 else
3016 return;
3017 }
3018 if (size > 4096) {
3019 if (adapter->max_rx_buff_alloc_size > 4096)
3020 adapter->rx_mbuf_sz = MJUM9BYTES;
3021 else
3022 return;
3023 }
3024 if (size > 9216) {
3025 if (adapter->max_rx_buff_alloc_size > 9216)
3026 adapter->rx_mbuf_sz = MJUM16BYTES;
3027 else
3028 return;
3029 }
3030 }
3031
3032 static int
al_eth_change_mtu(struct al_eth_adapter * adapter,int new_mtu)3033 al_eth_change_mtu(struct al_eth_adapter *adapter, int new_mtu)
3034 {
3035 int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
3036 ETHER_VLAN_ENCAP_LEN;
3037
3038 al_eth_req_rx_buff_size(adapter, new_mtu);
3039
3040 device_printf_dbg(adapter->dev, "set MTU to %d\n", new_mtu);
3041 al_eth_rx_pkt_limit_config(&adapter->hal_adapter,
3042 AL_ETH_MIN_FRAME_LEN, max_frame);
3043
3044 al_eth_tso_mss_config(&adapter->hal_adapter, 0, new_mtu - 100);
3045
3046 return (0);
3047 }
3048
3049 static int
al_eth_check_mtu(struct al_eth_adapter * adapter,int new_mtu)3050 al_eth_check_mtu(struct al_eth_adapter *adapter, int new_mtu)
3051 {
3052 int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
3053
3054 if ((new_mtu < AL_ETH_MIN_FRAME_LEN) ||
3055 (max_frame > AL_ETH_MAX_FRAME_LEN)) {
3056 return (EINVAL);
3057 }
3058
3059 return (0);
3060 }
3061
3062 static int
al_eth_udma_queue_enable(struct al_eth_adapter * adapter,enum al_udma_type type,int qid)3063 al_eth_udma_queue_enable(struct al_eth_adapter *adapter, enum al_udma_type type,
3064 int qid)
3065 {
3066 int rc = 0;
3067 char *name = (type == UDMA_TX) ? "Tx" : "Rx";
3068 struct al_udma_q_params *q_params;
3069
3070 if (type == UDMA_TX)
3071 q_params = &adapter->tx_ring[qid].q_params;
3072 else
3073 q_params = &adapter->rx_ring[qid].q_params;
3074
3075 rc = al_eth_queue_config(&adapter->hal_adapter, type, qid, q_params);
3076 if (rc < 0) {
3077 device_printf(adapter->dev, "config %s queue %u failed\n", name,
3078 qid);
3079 return (rc);
3080 }
3081 return (rc);
3082 }
3083
3084 static int
al_eth_udma_queues_enable_all(struct al_eth_adapter * adapter)3085 al_eth_udma_queues_enable_all(struct al_eth_adapter *adapter)
3086 {
3087 int i;
3088
3089 for (i = 0; i < adapter->num_tx_queues; i++)
3090 al_eth_udma_queue_enable(adapter, UDMA_TX, i);
3091
3092 for (i = 0; i < adapter->num_rx_queues; i++)
3093 al_eth_udma_queue_enable(adapter, UDMA_RX, i);
3094
3095 return (0);
3096 }
3097
3098 static void
al_eth_up_complete(struct al_eth_adapter * adapter)3099 al_eth_up_complete(struct al_eth_adapter *adapter)
3100 {
3101
3102 al_eth_configure_int_mode(adapter);
3103 al_eth_config_rx_fwd(adapter);
3104 al_eth_change_mtu(adapter, if_getmtu(adapter->netdev));
3105 al_eth_udma_queues_enable_all(adapter);
3106 al_eth_refill_all_rx_bufs(adapter);
3107 al_eth_interrupts_unmask(adapter);
3108
3109 /* enable forwarding interrupts from eth through pci end point */
3110 if ((adapter->board_type == ALPINE_FPGA_NIC) ||
3111 (adapter->board_type == ALPINE_NIC)) {
3112 al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base +
3113 AL_REG_OFFSET_FORWARD_INTR, AL_EN_FORWARD_INTR);
3114 }
3115
3116 al_eth_flow_ctrl_enable(adapter);
3117
3118 mtx_lock(&adapter->stats_mtx);
3119 callout_reset(&adapter->stats_callout, hz, al_tick_stats, (void*)adapter);
3120 mtx_unlock(&adapter->stats_mtx);
3121
3122 al_eth_mac_start(&adapter->hal_adapter);
3123 }
3124
3125 static int
al_media_update(if_t ifp)3126 al_media_update(if_t ifp)
3127 {
3128 struct al_eth_adapter *adapter = if_getsoftc(ifp);
3129
3130 if ((if_getflags(ifp) & IFF_UP) != 0)
3131 mii_mediachg(adapter->mii);
3132
3133 return (0);
3134 }
3135
3136 static void
al_media_status(if_t ifp,struct ifmediareq * ifmr)3137 al_media_status(if_t ifp, struct ifmediareq *ifmr)
3138 {
3139 struct al_eth_adapter *sc = if_getsoftc(ifp);
3140 struct mii_data *mii;
3141
3142 if (sc->mii == NULL) {
3143 ifmr->ifm_active = IFM_ETHER | IFM_NONE;
3144 ifmr->ifm_status = 0;
3145
3146 return;
3147 }
3148
3149 mii = sc->mii;
3150 mii_pollstat(mii);
3151
3152 ifmr->ifm_active = mii->mii_media_active;
3153 ifmr->ifm_status = mii->mii_media_status;
3154 }
3155
3156 static void
al_tick(void * arg)3157 al_tick(void *arg)
3158 {
3159 struct al_eth_adapter *adapter = arg;
3160
3161 mii_tick(adapter->mii);
3162
3163 /* Schedule another timeout one second from now */
3164 callout_schedule(&adapter->wd_callout, hz);
3165 }
3166
3167 static void
al_tick_stats(void * arg)3168 al_tick_stats(void *arg)
3169 {
3170 struct al_eth_adapter *adapter = arg;
3171
3172 al_eth_update_stats(adapter);
3173
3174 callout_schedule(&adapter->stats_callout, hz);
3175 }
3176
3177 static int
al_eth_up(struct al_eth_adapter * adapter)3178 al_eth_up(struct al_eth_adapter *adapter)
3179 {
3180 if_t ifp = adapter->netdev;
3181 int rc;
3182
3183 if (adapter->up)
3184 return (0);
3185
3186 if ((adapter->flags & AL_ETH_FLAG_RESET_REQUESTED) != 0) {
3187 al_eth_function_reset(adapter);
3188 adapter->flags &= ~AL_ETH_FLAG_RESET_REQUESTED;
3189 }
3190
3191 if_sethwassist(ifp, 0);
3192 if ((if_getcapenable(ifp) & IFCAP_TSO) != 0)
3193 if_sethwassistbits(ifp, CSUM_TSO, 0);
3194 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
3195 if_sethwassistbits(ifp, (CSUM_TCP | CSUM_UDP), 0);
3196 if ((if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) != 0)
3197 if_sethwassistbits(ifp, (CSUM_TCP_IPV6 | CSUM_UDP_IPV6), 0);
3198
3199 al_eth_serdes_init(adapter);
3200
3201 rc = al_eth_hw_init(adapter);
3202 if (rc != 0)
3203 goto err_hw_init_open;
3204
3205 rc = al_eth_setup_int_mode(adapter);
3206 if (rc != 0) {
3207 device_printf(adapter->dev,
3208 "%s failed at setup interrupt mode!\n", __func__);
3209 goto err_setup_int;
3210 }
3211
3212 /* allocate transmit descriptors */
3213 rc = al_eth_setup_all_tx_resources(adapter);
3214 if (rc != 0)
3215 goto err_setup_tx;
3216
3217 /* allocate receive descriptors */
3218 rc = al_eth_setup_all_rx_resources(adapter);
3219 if (rc != 0)
3220 goto err_setup_rx;
3221
3222 rc = al_eth_request_irq(adapter);
3223 if (rc != 0)
3224 goto err_req_irq;
3225
3226 al_eth_up_complete(adapter);
3227
3228 adapter->up = true;
3229
3230 if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial)
3231 if_link_state_change(adapter->netdev, LINK_STATE_UP);
3232
3233 if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) {
3234 mii_mediachg(adapter->mii);
3235
3236 /* Schedule watchdog timeout */
3237 mtx_lock(&adapter->wd_mtx);
3238 callout_reset(&adapter->wd_callout, hz, al_tick, adapter);
3239 mtx_unlock(&adapter->wd_mtx);
3240
3241 mii_pollstat(adapter->mii);
3242 }
3243
3244 return (rc);
3245
3246 err_req_irq:
3247 al_eth_free_all_rx_resources(adapter);
3248 err_setup_rx:
3249 al_eth_free_all_tx_resources(adapter);
3250 err_setup_tx:
3251 al_eth_free_irq(adapter);
3252 err_setup_int:
3253 al_eth_hw_stop(adapter);
3254 err_hw_init_open:
3255 al_eth_function_reset(adapter);
3256
3257 return (rc);
3258 }
3259
3260 static int
al_shutdown(device_t dev)3261 al_shutdown(device_t dev)
3262 {
3263 struct al_eth_adapter *adapter = device_get_softc(dev);
3264
3265 al_eth_down(adapter);
3266
3267 return (0);
3268 }
3269
3270 static void
al_eth_down(struct al_eth_adapter * adapter)3271 al_eth_down(struct al_eth_adapter *adapter)
3272 {
3273
3274 device_printf_dbg(adapter->dev, "al_eth_down: begin\n");
3275
3276 adapter->up = false;
3277
3278 mtx_lock(&adapter->wd_mtx);
3279 callout_stop(&adapter->wd_callout);
3280 mtx_unlock(&adapter->wd_mtx);
3281
3282 al_eth_disable_int_sync(adapter);
3283
3284 mtx_lock(&adapter->stats_mtx);
3285 callout_stop(&adapter->stats_callout);
3286 mtx_unlock(&adapter->stats_mtx);
3287
3288 al_eth_free_irq(adapter);
3289 al_eth_hw_stop(adapter);
3290
3291 al_eth_free_all_tx_resources(adapter);
3292 al_eth_free_all_rx_resources(adapter);
3293 }
3294
3295 static int
al_ioctl(if_t ifp,u_long command,caddr_t data)3296 al_ioctl(if_t ifp, u_long command, caddr_t data)
3297 {
3298 struct al_eth_adapter *adapter = if_getsoftc(ifp);
3299 struct ifreq *ifr = (struct ifreq *)data;
3300 int error = 0;
3301
3302 switch (command) {
3303 case SIOCSIFMTU:
3304 {
3305 error = al_eth_check_mtu(adapter, ifr->ifr_mtu);
3306 if (error != 0) {
3307 device_printf(adapter->dev, "ioctl wrong mtu %u\n",
3308 if_getmtu(adapter->netdev));
3309 break;
3310 }
3311
3312 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
3313 if_setmtu(adapter->netdev, ifr->ifr_mtu);
3314 al_init(adapter);
3315 break;
3316 }
3317 case SIOCSIFFLAGS:
3318 if ((if_getflags(ifp) & IFF_UP) != 0) {
3319 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
3320 if (((if_getflags(ifp) ^ adapter->if_flags) &
3321 (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3322 device_printf_dbg(adapter->dev,
3323 "ioctl promisc/allmulti\n");
3324 al_eth_set_rx_mode(adapter);
3325 }
3326 } else {
3327 error = al_eth_up(adapter);
3328 if (error == 0)
3329 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
3330 }
3331 } else {
3332 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
3333 al_eth_down(adapter);
3334 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
3335 }
3336 }
3337
3338 adapter->if_flags = if_getflags(ifp);
3339 break;
3340
3341 case SIOCADDMULTI:
3342 case SIOCDELMULTI:
3343 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
3344 device_printf_dbg(adapter->dev,
3345 "ioctl add/del multi before\n");
3346 al_eth_set_rx_mode(adapter);
3347 #ifdef DEVICE_POLLING
3348 if ((if_getcapenable(ifp) & IFCAP_POLLING) == 0)
3349 #endif
3350 }
3351 break;
3352 case SIOCSIFMEDIA:
3353 case SIOCGIFMEDIA:
3354 if (adapter->mii != NULL)
3355 error = ifmedia_ioctl(ifp, ifr,
3356 &adapter->mii->mii_media, command);
3357 else
3358 error = ifmedia_ioctl(ifp, ifr,
3359 &adapter->media, command);
3360 break;
3361 case SIOCSIFCAP:
3362 {
3363 int mask, reinit;
3364
3365 reinit = 0;
3366 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
3367 #ifdef DEVICE_POLLING
3368 if ((mask & IFCAP_POLLING) != 0) {
3369 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
3370 if (error != 0)
3371 return (error);
3372 if_setcapenablebit(ifp, IFCAP_POLLING, 0);
3373 } else {
3374 error = ether_poll_deregister(ifp);
3375 /* Enable interrupt even in error case */
3376 if_setcapenablebit(ifp, 0, IFCAP_POLLING);
3377 }
3378 }
3379 #endif
3380 if ((mask & IFCAP_HWCSUM) != 0) {
3381 /* apply to both rx and tx */
3382 if_togglecapenable(ifp, IFCAP_HWCSUM);
3383 reinit = 1;
3384 }
3385 if ((mask & IFCAP_HWCSUM_IPV6) != 0) {
3386 if_togglecapenable(ifp, IFCAP_HWCSUM_IPV6);
3387 reinit = 1;
3388 }
3389 if ((mask & IFCAP_TSO) != 0) {
3390 if_togglecapenable(ifp, IFCAP_TSO);
3391 reinit = 1;
3392 }
3393 if ((mask & IFCAP_LRO) != 0) {
3394 if_togglecapenable(ifp, IFCAP_LRO);
3395 }
3396 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
3397 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
3398 reinit = 1;
3399 }
3400 if ((mask & IFCAP_VLAN_HWFILTER) != 0) {
3401 if_togglecapenable(ifp, IFCAP_VLAN_HWFILTER);
3402 reinit = 1;
3403 }
3404 if ((mask & IFCAP_VLAN_HWTSO) != 0) {
3405 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
3406 reinit = 1;
3407 }
3408 if ((reinit != 0) &&
3409 ((if_getdrvflags(ifp) & IFF_DRV_RUNNING)) != 0)
3410 {
3411 al_init(adapter);
3412 }
3413 break;
3414 }
3415
3416 default:
3417 error = ether_ioctl(ifp, command, data);
3418 break;
3419 }
3420
3421 return (error);
3422 }
3423
3424 static int
al_is_device_supported(device_t dev)3425 al_is_device_supported(device_t dev)
3426 {
3427 uint16_t pci_vendor_id = pci_get_vendor(dev);
3428 uint16_t pci_device_id = pci_get_device(dev);
3429
3430 return (pci_vendor_id == PCI_VENDOR_ID_ANNAPURNA_LABS &&
3431 (pci_device_id == PCI_DEVICE_ID_AL_ETH ||
3432 pci_device_id == PCI_DEVICE_ID_AL_ETH_ADVANCED ||
3433 pci_device_id == PCI_DEVICE_ID_AL_ETH_NIC ||
3434 pci_device_id == PCI_DEVICE_ID_AL_ETH_FPGA_NIC));
3435 }
3436
3437 /* Time in mSec to keep trying to read / write from MDIO in case of error */
3438 #define MDIO_TIMEOUT_MSEC 100
3439 #define MDIO_PAUSE_MSEC 10
3440
3441 static int
al_miibus_readreg(device_t dev,int phy,int reg)3442 al_miibus_readreg(device_t dev, int phy, int reg)
3443 {
3444 struct al_eth_adapter *adapter = device_get_softc(dev);
3445 uint16_t value = 0;
3446 int rc;
3447 int timeout = MDIO_TIMEOUT_MSEC;
3448
3449 while (timeout > 0) {
3450 rc = al_eth_mdio_read(&adapter->hal_adapter, adapter->phy_addr,
3451 -1, reg, &value);
3452
3453 if (rc == 0)
3454 return (value);
3455
3456 device_printf_dbg(adapter->dev,
3457 "mdio read failed. try again in 10 msec\n");
3458
3459 timeout -= MDIO_PAUSE_MSEC;
3460 pause("readred pause", MDIO_PAUSE_MSEC);
3461 }
3462
3463 if (rc != 0)
3464 device_printf(adapter->dev, "MDIO read failed on timeout\n");
3465
3466 return (value);
3467 }
3468
3469 static int
al_miibus_writereg(device_t dev,int phy,int reg,int value)3470 al_miibus_writereg(device_t dev, int phy, int reg, int value)
3471 {
3472 struct al_eth_adapter *adapter = device_get_softc(dev);
3473 int rc;
3474 int timeout = MDIO_TIMEOUT_MSEC;
3475
3476 while (timeout > 0) {
3477 rc = al_eth_mdio_write(&adapter->hal_adapter, adapter->phy_addr,
3478 -1, reg, value);
3479
3480 if (rc == 0)
3481 return (0);
3482
3483 device_printf(adapter->dev,
3484 "mdio write failed. try again in 10 msec\n");
3485
3486 timeout -= MDIO_PAUSE_MSEC;
3487 pause("miibus writereg", MDIO_PAUSE_MSEC);
3488 }
3489
3490 if (rc != 0)
3491 device_printf(adapter->dev, "MDIO write failed on timeout\n");
3492
3493 return (rc);
3494 }
3495
3496 static void
al_miibus_statchg(device_t dev)3497 al_miibus_statchg(device_t dev)
3498 {
3499 struct al_eth_adapter *adapter = device_get_softc(dev);
3500
3501 device_printf_dbg(adapter->dev,
3502 "al_miibus_statchg: state has changed!\n");
3503 device_printf_dbg(adapter->dev,
3504 "al_miibus_statchg: active = 0x%x status = 0x%x\n",
3505 adapter->mii->mii_media_active, adapter->mii->mii_media_status);
3506
3507 if (adapter->up == 0)
3508 return;
3509
3510 if ((adapter->mii->mii_media_status & IFM_AVALID) != 0) {
3511 if (adapter->mii->mii_media_status & IFM_ACTIVE) {
3512 device_printf(adapter->dev, "link is UP\n");
3513 if_link_state_change(adapter->netdev, LINK_STATE_UP);
3514 } else {
3515 device_printf(adapter->dev, "link is DOWN\n");
3516 if_link_state_change(adapter->netdev, LINK_STATE_DOWN);
3517 }
3518 }
3519 }
3520
3521 static void
al_miibus_linkchg(device_t dev)3522 al_miibus_linkchg(device_t dev)
3523 {
3524 struct al_eth_adapter *adapter = device_get_softc(dev);
3525 uint8_t duplex = 0;
3526 uint8_t speed = 0;
3527
3528 if (adapter->mii == NULL)
3529 return;
3530
3531 if ((if_getflags(adapter->netdev) & IFF_UP) == 0)
3532 return;
3533
3534 /* Ignore link changes when link is not ready */
3535 if ((adapter->mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) !=
3536 (IFM_AVALID | IFM_ACTIVE)) {
3537 return;
3538 }
3539
3540 if ((adapter->mii->mii_media_active & IFM_FDX) != 0)
3541 duplex = 1;
3542
3543 speed = IFM_SUBTYPE(adapter->mii->mii_media_active);
3544
3545 if (speed == IFM_10_T) {
3546 al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3547 AL_10BASE_T_SPEED, duplex);
3548 return;
3549 }
3550
3551 if (speed == IFM_100_TX) {
3552 al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3553 AL_100BASE_TX_SPEED, duplex);
3554 return;
3555 }
3556
3557 if (speed == IFM_1000_T) {
3558 al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3559 AL_1000BASE_T_SPEED, duplex);
3560 return;
3561 }
3562
3563 device_printf(adapter->dev, "ERROR: unknown MII media active 0x%08x\n",
3564 adapter->mii->mii_media_active);
3565 }
3566