xref: /freebsd/sys/dev/cxgb/cxgb_main.c (revision 796bcf18)
1 /**************************************************************************
2 SPDX-License-Identifier: BSD-2-Clause
3 
4 Copyright (c) 2007-2009, Chelsio Inc.
5 All rights reserved.
6 
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
9 
10  1. Redistributions of source code must retain the above copyright notice,
11     this list of conditions and the following disclaimer.
12 
13  2. Neither the name of the Chelsio Corporation nor the names of its
14     contributors may be used to endorse or promote products derived from
15     this software without specific prior written permission.
16 
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 POSSIBILITY OF SUCH DAMAGE.
28 
29 ***************************************************************************/
30 
31 #include <sys/cdefs.h>
32 #include "opt_inet.h"
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/bus.h>
38 #include <sys/module.h>
39 #include <sys/pciio.h>
40 #include <sys/conf.h>
41 #include <machine/bus.h>
42 #include <machine/resource.h>
43 #include <sys/ktr.h>
44 #include <sys/rman.h>
45 #include <sys/ioccom.h>
46 #include <sys/mbuf.h>
47 #include <sys/linker.h>
48 #include <sys/firmware.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/smp.h>
52 #include <sys/sysctl.h>
53 #include <sys/syslog.h>
54 #include <sys/queue.h>
55 #include <sys/taskqueue.h>
56 #include <sys/proc.h>
57 
58 #include <net/bpf.h>
59 #include <net/debugnet.h>
60 #include <net/ethernet.h>
61 #include <net/if.h>
62 #include <net/if_var.h>
63 #include <net/if_arp.h>
64 #include <net/if_dl.h>
65 #include <net/if_media.h>
66 #include <net/if_types.h>
67 #include <net/if_vlan_var.h>
68 
69 #include <netinet/in_systm.h>
70 #include <netinet/in.h>
71 #include <netinet/if_ether.h>
72 #include <netinet/ip.h>
73 #include <netinet/ip.h>
74 #include <netinet/tcp.h>
75 #include <netinet/udp.h>
76 
77 #include <dev/pci/pcireg.h>
78 #include <dev/pci/pcivar.h>
79 #include <dev/pci/pci_private.h>
80 
81 #include <cxgb_include.h>
82 
83 #ifdef PRIV_SUPPORTED
84 #include <sys/priv.h>
85 #endif
86 
87 static int cxgb_setup_interrupts(adapter_t *);
88 static void cxgb_teardown_interrupts(adapter_t *);
89 static void cxgb_init(void *);
90 static int cxgb_init_locked(struct port_info *);
91 static int cxgb_uninit_locked(struct port_info *);
92 static int cxgb_uninit_synchronized(struct port_info *);
93 static int cxgb_ioctl(if_t, unsigned long, caddr_t);
94 static int cxgb_media_change(if_t);
95 static int cxgb_ifm_type(int);
96 static void cxgb_build_medialist(struct port_info *);
97 static void cxgb_media_status(if_t, struct ifmediareq *);
98 static uint64_t cxgb_get_counter(if_t, ift_counter);
99 static int setup_sge_qsets(adapter_t *);
100 static void cxgb_async_intr(void *);
101 static void cxgb_tick_handler(void *, int);
102 static void cxgb_tick(void *);
103 static void link_check_callout(void *);
104 static void check_link_status(void *, int);
105 static void setup_rss(adapter_t *sc);
106 static int alloc_filters(struct adapter *);
107 static int setup_hw_filters(struct adapter *);
108 static int set_filter(struct adapter *, int, const struct filter_info *);
109 static inline void mk_set_tcb_field(struct cpl_set_tcb_field *, unsigned int,
110     unsigned int, u64, u64);
111 static inline void set_tcb_field_ulp(struct cpl_set_tcb_field *, unsigned int,
112     unsigned int, u64, u64);
113 #ifdef TCP_OFFLOAD
114 static int cpl_not_handled(struct sge_qset *, struct rsp_desc *, struct mbuf *);
115 #endif
116 
117 /* Attachment glue for the PCI controller end of the device.  Each port of
118  * the device is attached separately, as defined later.
119  */
120 static int cxgb_controller_probe(device_t);
121 static int cxgb_controller_attach(device_t);
122 static int cxgb_controller_detach(device_t);
123 static void cxgb_free(struct adapter *);
124 static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
125     unsigned int end);
126 static void cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf);
127 static int cxgb_get_regs_len(void);
128 static void touch_bars(device_t dev);
129 static void cxgb_update_mac_settings(struct port_info *p);
130 #ifdef TCP_OFFLOAD
131 static int toe_capability(struct port_info *, int);
132 #endif
133 
134 /* Table for probing the cards.  The desc field isn't actually used */
135 struct cxgb_ident {
136 	uint16_t	vendor;
137 	uint16_t	device;
138 	int		index;
139 	char		*desc;
140 } cxgb_identifiers[] = {
141 	{PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
142 	{PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
143 	{PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
144 	{PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
145 	{PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
146 	{PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
147 	{PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
148 	{PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
149 	{PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
150 	{PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
151 	{PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"},
152 	{PCI_VENDOR_ID_CHELSIO, 0x0035, 6, "T3C10"},
153 	{PCI_VENDOR_ID_CHELSIO, 0x0036, 3, "S320E-CR"},
154 	{PCI_VENDOR_ID_CHELSIO, 0x0037, 7, "N320E-G2"},
155 	{0, 0, 0, NULL}
156 };
157 
158 static device_method_t cxgb_controller_methods[] = {
159 	DEVMETHOD(device_probe,		cxgb_controller_probe),
160 	DEVMETHOD(device_attach,	cxgb_controller_attach),
161 	DEVMETHOD(device_detach,	cxgb_controller_detach),
162 
163 	DEVMETHOD_END
164 };
165 
166 static driver_t cxgb_controller_driver = {
167 	"cxgbc",
168 	cxgb_controller_methods,
169 	sizeof(struct adapter)
170 };
171 
172 static int cxgbc_mod_event(module_t, int, void *);
173 
174 DRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgbc_mod_event, NULL);
175 MODULE_PNP_INFO("U16:vendor;U16:device", pci, cxgbc, cxgb_identifiers,
176     nitems(cxgb_identifiers) - 1);
177 MODULE_VERSION(cxgbc, 1);
178 MODULE_DEPEND(cxgbc, firmware, 1, 1, 1);
179 
180 /*
181  * Attachment glue for the ports.  Attachment is done directly to the
182  * controller device.
183  */
184 static int cxgb_port_probe(device_t);
185 static int cxgb_port_attach(device_t);
186 static int cxgb_port_detach(device_t);
187 
188 static device_method_t cxgb_port_methods[] = {
189 	DEVMETHOD(device_probe,		cxgb_port_probe),
190 	DEVMETHOD(device_attach,	cxgb_port_attach),
191 	DEVMETHOD(device_detach,	cxgb_port_detach),
192 	{ 0, 0 }
193 };
194 
195 static driver_t cxgb_port_driver = {
196 	"cxgb",
197 	cxgb_port_methods,
198 	0
199 };
200 
201 static d_ioctl_t cxgb_extension_ioctl;
202 static d_open_t cxgb_extension_open;
203 static d_close_t cxgb_extension_close;
204 
205 static struct cdevsw cxgb_cdevsw = {
206        .d_version =    D_VERSION,
207        .d_flags =      0,
208        .d_open =       cxgb_extension_open,
209        .d_close =      cxgb_extension_close,
210        .d_ioctl =      cxgb_extension_ioctl,
211        .d_name =       "cxgb",
212 };
213 
214 DRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, 0, 0);
215 MODULE_VERSION(cxgb, 1);
216 
217 DEBUGNET_DEFINE(cxgb);
218 
219 static struct mtx t3_list_lock;
220 static SLIST_HEAD(, adapter) t3_list;
221 #ifdef TCP_OFFLOAD
222 static struct mtx t3_uld_list_lock;
223 static SLIST_HEAD(, uld_info) t3_uld_list;
224 #endif
225 
226 /*
227  * The driver uses the best interrupt scheme available on a platform in the
228  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
229  * of these schemes the driver may consider as follows:
230  *
231  * msi = 2: choose from among all three options
232  * msi = 1 : only consider MSI and pin interrupts
233  * msi = 0: force pin interrupts
234  */
235 static int msi_allowed = 2;
236 
237 SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
238     "CXGB driver parameters");
239 SYSCTL_INT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
240     "MSI-X, MSI, INTx selector");
241 
242 /*
243  * The driver uses an auto-queue algorithm by default.
244  * To disable it and force a single queue-set per port, use multiq = 0
245  */
246 static int multiq = 1;
247 SYSCTL_INT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0,
248     "use min(ncpus/ports, 8) queue-sets per port");
249 
250 /*
251  * By default the driver will not update the firmware unless
252  * it was compiled against a newer version
253  *
254  */
255 static int force_fw_update = 0;
256 SYSCTL_INT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0,
257     "update firmware even if up to date");
258 
259 int cxgb_use_16k_clusters = -1;
260 SYSCTL_INT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN,
261     &cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue ");
262 
263 static int nfilters = -1;
264 SYSCTL_INT(_hw_cxgb, OID_AUTO, nfilters, CTLFLAG_RDTUN,
265     &nfilters, 0, "max number of entries in the filter table");
266 
267 enum {
268 	MAX_TXQ_ENTRIES      = 16384,
269 	MAX_CTRL_TXQ_ENTRIES = 1024,
270 	MAX_RSPQ_ENTRIES     = 16384,
271 	MAX_RX_BUFFERS       = 16384,
272 	MAX_RX_JUMBO_BUFFERS = 16384,
273 	MIN_TXQ_ENTRIES      = 4,
274 	MIN_CTRL_TXQ_ENTRIES = 4,
275 	MIN_RSPQ_ENTRIES     = 32,
276 	MIN_FL_ENTRIES       = 32,
277 	MIN_FL_JUMBO_ENTRIES = 32
278 };
279 
280 struct filter_info {
281 	u32 sip;
282 	u32 sip_mask;
283 	u32 dip;
284 	u16 sport;
285 	u16 dport;
286 	u32 vlan:12;
287 	u32 vlan_prio:3;
288 	u32 mac_hit:1;
289 	u32 mac_idx:4;
290 	u32 mac_vld:1;
291 	u32 pkt_type:2;
292 	u32 report_filter_id:1;
293 	u32 pass:1;
294 	u32 rss:1;
295 	u32 qset:3;
296 	u32 locked:1;
297 	u32 valid:1;
298 };
299 
300 enum { FILTER_NO_VLAN_PRI = 7 };
301 
302 #define EEPROM_MAGIC 0x38E2F10C
303 
304 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
305 
306 
307 static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset);
308 
309 
310 static __inline char
t3rev2char(struct adapter * adapter)311 t3rev2char(struct adapter *adapter)
312 {
313 	char rev = 'z';
314 
315 	switch(adapter->params.rev) {
316 	case T3_REV_A:
317 		rev = 'a';
318 		break;
319 	case T3_REV_B:
320 	case T3_REV_B2:
321 		rev = 'b';
322 		break;
323 	case T3_REV_C:
324 		rev = 'c';
325 		break;
326 	}
327 	return rev;
328 }
329 
330 static struct cxgb_ident *
cxgb_get_ident(device_t dev)331 cxgb_get_ident(device_t dev)
332 {
333 	struct cxgb_ident *id;
334 
335 	for (id = cxgb_identifiers; id->desc != NULL; id++) {
336 		if ((id->vendor == pci_get_vendor(dev)) &&
337 		    (id->device == pci_get_device(dev))) {
338 			return (id);
339 		}
340 	}
341 	return (NULL);
342 }
343 
344 static const struct adapter_info *
cxgb_get_adapter_info(device_t dev)345 cxgb_get_adapter_info(device_t dev)
346 {
347 	struct cxgb_ident *id;
348 	const struct adapter_info *ai;
349 
350 	id = cxgb_get_ident(dev);
351 	if (id == NULL)
352 		return (NULL);
353 
354 	ai = t3_get_adapter_info(id->index);
355 
356 	return (ai);
357 }
358 
359 static int
cxgb_controller_probe(device_t dev)360 cxgb_controller_probe(device_t dev)
361 {
362 	const struct adapter_info *ai;
363 	const char *ports;
364 	int nports;
365 
366 	ai = cxgb_get_adapter_info(dev);
367 	if (ai == NULL)
368 		return (ENXIO);
369 
370 	nports = ai->nports0 + ai->nports1;
371 	if (nports == 1)
372 		ports = "port";
373 	else
374 		ports = "ports";
375 
376 	device_set_descf(dev, "%s, %d %s", ai->desc, nports, ports);
377 	return (BUS_PROBE_DEFAULT);
378 }
379 
380 #define FW_FNAME "cxgb_t3fw"
381 #define TPEEPROM_NAME "cxgb_t3%c_tp_eeprom"
382 #define TPSRAM_NAME "cxgb_t3%c_protocol_sram"
383 
384 static int
upgrade_fw(adapter_t * sc)385 upgrade_fw(adapter_t *sc)
386 {
387 	const struct firmware *fw;
388 	int status;
389 	u32 vers;
390 
391 	if ((fw = firmware_get(FW_FNAME)) == NULL)  {
392 		device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME);
393 		return (ENOENT);
394 	} else
395 		device_printf(sc->dev, "installing firmware on card\n");
396 	status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
397 
398 	if (status != 0) {
399 		device_printf(sc->dev, "failed to install firmware: %d\n",
400 		    status);
401 	} else {
402 		t3_get_fw_version(sc, &vers);
403 		snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
404 		    G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
405 		    G_FW_VERSION_MICRO(vers));
406 	}
407 
408 	firmware_put(fw, FIRMWARE_UNLOAD);
409 
410 	return (status);
411 }
412 
413 /*
414  * The cxgb_controller_attach function is responsible for the initial
415  * bringup of the device.  Its responsibilities include:
416  *
417  *  1. Determine if the device supports MSI or MSI-X.
418  *  2. Allocate bus resources so that we can access the Base Address Register
419  *  3. Create and initialize mutexes for the controller and its control
420  *     logic such as SGE and MDIO.
421  *  4. Call hardware specific setup routine for the adapter as a whole.
422  *  5. Allocate the BAR for doing MSI-X.
423  *  6. Setup the line interrupt iff MSI-X is not supported.
424  *  7. Create the driver's taskq.
425  *  8. Start one task queue service thread.
426  *  9. Check if the firmware and SRAM are up-to-date.  They will be
427  *     auto-updated later (before FULL_INIT_DONE), if required.
428  * 10. Create a child device for each MAC (port)
429  * 11. Initialize T3 private state.
430  * 12. Trigger the LED
431  * 13. Setup offload iff supported.
432  * 14. Reset/restart the tick callout.
433  * 15. Attach sysctls
434  *
435  * NOTE: Any modification or deviation from this list MUST be reflected in
436  * the above comment.  Failure to do so will result in problems on various
437  * error conditions including link flapping.
438  */
439 static int
cxgb_controller_attach(device_t dev)440 cxgb_controller_attach(device_t dev)
441 {
442 	device_t child;
443 	const struct adapter_info *ai;
444 	struct adapter *sc;
445 	int i, error = 0;
446 	uint32_t vers;
447 	int port_qsets = 1;
448 	int msi_needed, reg;
449 
450 	sc = device_get_softc(dev);
451 	sc->dev = dev;
452 	sc->msi_count = 0;
453 	ai = cxgb_get_adapter_info(dev);
454 
455 	snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
456 	    device_get_unit(dev));
457 	ADAPTER_LOCK_INIT(sc, sc->lockbuf);
458 
459 	snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
460 	    device_get_unit(dev));
461 	snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
462 	    device_get_unit(dev));
463 	snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
464 	    device_get_unit(dev));
465 
466 	MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN);
467 	MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
468 	MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
469 
470 	mtx_lock(&t3_list_lock);
471 	SLIST_INSERT_HEAD(&t3_list, sc, link);
472 	mtx_unlock(&t3_list_lock);
473 
474 	/* find the PCIe link width and set max read request to 4KB*/
475 	if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
476 		uint16_t lnk;
477 
478 		lnk = pci_read_config(dev, reg + PCIER_LINK_STA, 2);
479 		sc->link_width = (lnk & PCIEM_LINK_STA_WIDTH) >> 4;
480 		if (sc->link_width < 8 &&
481 		    (ai->caps & SUPPORTED_10000baseT_Full)) {
482 			device_printf(sc->dev,
483 			    "PCIe x%d Link, expect reduced performance\n",
484 			    sc->link_width);
485 		}
486 
487 		pci_set_max_read_req(dev, 4096);
488 	}
489 
490 	touch_bars(dev);
491 	pci_enable_busmaster(dev);
492 	/*
493 	 * Allocate the registers and make them available to the driver.
494 	 * The registers that we care about for NIC mode are in BAR 0
495 	 */
496 	sc->regs_rid = PCIR_BAR(0);
497 	if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
498 	    &sc->regs_rid, RF_ACTIVE)) == NULL) {
499 		device_printf(dev, "Cannot allocate BAR region 0\n");
500 		error = ENXIO;
501 		goto out;
502 	}
503 
504 	sc->bt = rman_get_bustag(sc->regs_res);
505 	sc->bh = rman_get_bushandle(sc->regs_res);
506 	sc->mmio_len = rman_get_size(sc->regs_res);
507 
508 	for (i = 0; i < MAX_NPORTS; i++)
509 		sc->port[i].adapter = sc;
510 
511 	if (t3_prep_adapter(sc, ai, 1) < 0) {
512 		printf("prep adapter failed\n");
513 		error = ENODEV;
514 		goto out;
515 	}
516 
517 	sc->udbs_rid = PCIR_BAR(2);
518 	sc->udbs_res = NULL;
519 	if (is_offload(sc) &&
520 	    ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
521 		   &sc->udbs_rid, RF_ACTIVE)) == NULL)) {
522 		device_printf(dev, "Cannot allocate BAR region 1\n");
523 		error = ENXIO;
524 		goto out;
525 	}
526 
527         /* Allocate the BAR for doing MSI-X.  If it succeeds, try to allocate
528 	 * enough messages for the queue sets.  If that fails, try falling
529 	 * back to MSI.  If that fails, then try falling back to the legacy
530 	 * interrupt pin model.
531 	 */
532 	sc->msix_regs_rid = 0x20;
533 	if ((msi_allowed >= 2) &&
534 	    (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
535 	    &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
536 
537 		if (multiq)
538 			port_qsets = min(SGE_QSETS/sc->params.nports, mp_ncpus);
539 		msi_needed = sc->msi_count = sc->params.nports * port_qsets + 1;
540 
541 		if (pci_msix_count(dev) == 0 ||
542 		    (error = pci_alloc_msix(dev, &sc->msi_count)) != 0 ||
543 		    sc->msi_count != msi_needed) {
544 			device_printf(dev, "alloc msix failed - "
545 				      "msi_count=%d, msi_needed=%d, err=%d; "
546 				      "will try MSI\n", sc->msi_count,
547 				      msi_needed, error);
548 			sc->msi_count = 0;
549 			port_qsets = 1;
550 			pci_release_msi(dev);
551 			bus_release_resource(dev, SYS_RES_MEMORY,
552 			    sc->msix_regs_rid, sc->msix_regs_res);
553 			sc->msix_regs_res = NULL;
554 		} else {
555 			sc->flags |= USING_MSIX;
556 			sc->cxgb_intr = cxgb_async_intr;
557 			device_printf(dev,
558 				      "using MSI-X interrupts (%u vectors)\n",
559 				      sc->msi_count);
560 		}
561 	}
562 
563 	if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
564 		sc->msi_count = 1;
565 		if ((error = pci_alloc_msi(dev, &sc->msi_count)) != 0) {
566 			device_printf(dev, "alloc msi failed - "
567 				      "err=%d; will try INTx\n", error);
568 			sc->msi_count = 0;
569 			port_qsets = 1;
570 			pci_release_msi(dev);
571 		} else {
572 			sc->flags |= USING_MSI;
573 			sc->cxgb_intr = t3_intr_msi;
574 			device_printf(dev, "using MSI interrupts\n");
575 		}
576 	}
577 	if (sc->msi_count == 0) {
578 		device_printf(dev, "using line interrupts\n");
579 		sc->cxgb_intr = t3b_intr;
580 	}
581 
582 	/* Create a private taskqueue thread for handling driver events */
583 	sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
584 	    taskqueue_thread_enqueue, &sc->tq);
585 	if (sc->tq == NULL) {
586 		device_printf(dev, "failed to allocate controller task queue\n");
587 		goto out;
588 	}
589 
590 	taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
591 	    device_get_nameunit(dev));
592 	TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
593 
594 
595 	/* Create a periodic callout for checking adapter status */
596 	callout_init(&sc->cxgb_tick_ch, 1);
597 
598 	if (t3_check_fw_version(sc) < 0 || force_fw_update) {
599 		/*
600 		 * Warn user that a firmware update will be attempted in init.
601 		 */
602 		device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
603 		    FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
604 		sc->flags &= ~FW_UPTODATE;
605 	} else {
606 		sc->flags |= FW_UPTODATE;
607 	}
608 
609 	if (t3_check_tpsram_version(sc) < 0) {
610 		/*
611 		 * Warn user that a firmware update will be attempted in init.
612 		 */
613 		device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n",
614 		    t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
615 		sc->flags &= ~TPS_UPTODATE;
616 	} else {
617 		sc->flags |= TPS_UPTODATE;
618 	}
619 
620 	/*
621 	 * Create a child device for each MAC.  The ethernet attachment
622 	 * will be done in these children.
623 	 */
624 	for (i = 0; i < (sc)->params.nports; i++) {
625 		struct port_info *pi;
626 
627 		if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
628 			device_printf(dev, "failed to add child port\n");
629 			error = EINVAL;
630 			goto out;
631 		}
632 		pi = &sc->port[i];
633 		pi->adapter = sc;
634 		pi->nqsets = port_qsets;
635 		pi->first_qset = i*port_qsets;
636 		pi->port_id = i;
637 		pi->tx_chan = i >= ai->nports0;
638 		pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i;
639 		sc->rxpkt_map[pi->txpkt_intf] = i;
640 		sc->port[i].tx_chan = i >= ai->nports0;
641 		sc->portdev[i] = child;
642 		device_set_softc(child, pi);
643 	}
644 	if ((error = bus_generic_attach(dev)) != 0)
645 		goto out;
646 
647 	/* initialize sge private state */
648 	t3_sge_init_adapter(sc);
649 
650 	t3_led_ready(sc);
651 
652 	error = t3_get_fw_version(sc, &vers);
653 	if (error)
654 		goto out;
655 
656 	snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
657 	    G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
658 	    G_FW_VERSION_MICRO(vers));
659 
660 	device_set_descf(dev, "%s %sNIC\t E/C: %s S/N: %s",
661 	    ai->desc, is_offload(sc) ? "R" : "",
662 	    sc->params.vpd.ec, sc->params.vpd.sn);
663 
664 	snprintf(&sc->port_types[0], sizeof(sc->port_types), "%x%x%x%x",
665 		 sc->params.vpd.port_type[0], sc->params.vpd.port_type[1],
666 		 sc->params.vpd.port_type[2], sc->params.vpd.port_type[3]);
667 
668 	device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]);
669 	callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
670 	t3_add_attach_sysctls(sc);
671 
672 #ifdef TCP_OFFLOAD
673 	for (i = 0; i < NUM_CPL_HANDLERS; i++)
674 		sc->cpl_handler[i] = cpl_not_handled;
675 #endif
676 
677 	t3_intr_clear(sc);
678 	error = cxgb_setup_interrupts(sc);
679 out:
680 	if (error)
681 		cxgb_free(sc);
682 
683 	return (error);
684 }
685 
686 /*
687  * The cxgb_controller_detach routine is called with the device is
688  * unloaded from the system.
689  */
690 
691 static int
cxgb_controller_detach(device_t dev)692 cxgb_controller_detach(device_t dev)
693 {
694 	struct adapter *sc;
695 
696 	sc = device_get_softc(dev);
697 
698 	cxgb_free(sc);
699 
700 	return (0);
701 }
702 
703 /*
704  * The cxgb_free() is called by the cxgb_controller_detach() routine
705  * to tear down the structures that were built up in
706  * cxgb_controller_attach(), and should be the final piece of work
707  * done when fully unloading the driver.
708  *
709  *
710  *  1. Shutting down the threads started by the cxgb_controller_attach()
711  *     routine.
712  *  2. Stopping the lower level device and all callouts (cxgb_down_locked()).
713  *  3. Detaching all of the port devices created during the
714  *     cxgb_controller_attach() routine.
715  *  4. Removing the device children created via cxgb_controller_attach().
716  *  5. Releasing PCI resources associated with the device.
717  *  6. Turning off the offload support, iff it was turned on.
718  *  7. Destroying the mutexes created in cxgb_controller_attach().
719  *
720  */
721 static void
cxgb_free(struct adapter * sc)722 cxgb_free(struct adapter *sc)
723 {
724 	int i, nqsets = 0;
725 
726 	ADAPTER_LOCK(sc);
727 	sc->flags |= CXGB_SHUTDOWN;
728 	ADAPTER_UNLOCK(sc);
729 
730 	/*
731 	 * Make sure all child devices are gone.
732 	 */
733 	bus_generic_detach(sc->dev);
734 	for (i = 0; i < (sc)->params.nports; i++) {
735 		if (sc->portdev[i] &&
736 		    device_delete_child(sc->dev, sc->portdev[i]) != 0)
737 			device_printf(sc->dev, "failed to delete child port\n");
738 		nqsets += sc->port[i].nqsets;
739 	}
740 
741 	/*
742 	 * At this point, it is as if cxgb_port_detach has run on all ports, and
743 	 * cxgb_down has run on the adapter.  All interrupts have been silenced,
744 	 * all open devices have been closed.
745 	 */
746 	KASSERT(sc->open_device_map == 0, ("%s: device(s) still open (%x)",
747 					   __func__, sc->open_device_map));
748 	for (i = 0; i < sc->params.nports; i++) {
749 		KASSERT(sc->port[i].ifp == NULL, ("%s: port %i undead!",
750 						  __func__, i));
751 	}
752 
753 	/*
754 	 * Finish off the adapter's callouts.
755 	 */
756 	callout_drain(&sc->cxgb_tick_ch);
757 	callout_drain(&sc->sge_timer_ch);
758 
759 	/*
760 	 * Release resources grabbed under FULL_INIT_DONE by cxgb_up.  The
761 	 * sysctls are cleaned up by the kernel linker.
762 	 */
763 	if (sc->flags & FULL_INIT_DONE) {
764  		t3_free_sge_resources(sc, nqsets);
765  		sc->flags &= ~FULL_INIT_DONE;
766  	}
767 
768 	/*
769 	 * Release all interrupt resources.
770 	 */
771 	cxgb_teardown_interrupts(sc);
772 	if (sc->flags & (USING_MSI | USING_MSIX)) {
773 		device_printf(sc->dev, "releasing msi message(s)\n");
774 		pci_release_msi(sc->dev);
775 	} else {
776 		device_printf(sc->dev, "no msi message to release\n");
777 	}
778 
779 	if (sc->msix_regs_res != NULL) {
780 		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
781 		    sc->msix_regs_res);
782 	}
783 
784 	/*
785 	 * Free the adapter's taskqueue.
786 	 */
787 	if (sc->tq != NULL) {
788 		taskqueue_free(sc->tq);
789 		sc->tq = NULL;
790 	}
791 
792 	free(sc->filters, M_DEVBUF);
793 	t3_sge_free(sc);
794 
795 	if (sc->udbs_res != NULL)
796 		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid,
797 		    sc->udbs_res);
798 
799 	if (sc->regs_res != NULL)
800 		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
801 		    sc->regs_res);
802 
803 	MTX_DESTROY(&sc->mdio_lock);
804 	MTX_DESTROY(&sc->sge.reg_lock);
805 	MTX_DESTROY(&sc->elmer_lock);
806 	mtx_lock(&t3_list_lock);
807 	SLIST_REMOVE(&t3_list, sc, adapter, link);
808 	mtx_unlock(&t3_list_lock);
809 	ADAPTER_LOCK_DEINIT(sc);
810 }
811 
812 /**
813  *	setup_sge_qsets - configure SGE Tx/Rx/response queues
814  *	@sc: the controller softc
815  *
816  *	Determines how many sets of SGE queues to use and initializes them.
817  *	We support multiple queue sets per port if we have MSI-X, otherwise
818  *	just one queue set per port.
819  */
820 static int
setup_sge_qsets(adapter_t * sc)821 setup_sge_qsets(adapter_t *sc)
822 {
823 	int i, j, err, irq_idx = 0, qset_idx = 0;
824 	u_int ntxq = SGE_TXQ_PER_SET;
825 
826 	if ((err = t3_sge_alloc(sc)) != 0) {
827 		device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
828 		return (err);
829 	}
830 
831 	if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
832 		irq_idx = -1;
833 
834 	for (i = 0; i < (sc)->params.nports; i++) {
835 		struct port_info *pi = &sc->port[i];
836 
837 		for (j = 0; j < pi->nqsets; j++, qset_idx++) {
838 			err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
839 			    (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
840 			    &sc->params.sge.qset[qset_idx], ntxq, pi);
841 			if (err) {
842 				t3_free_sge_resources(sc, qset_idx);
843 				device_printf(sc->dev,
844 				    "t3_sge_alloc_qset failed with %d\n", err);
845 				return (err);
846 			}
847 		}
848 	}
849 
850 	sc->nqsets = qset_idx;
851 
852 	return (0);
853 }
854 
855 static void
cxgb_teardown_interrupts(adapter_t * sc)856 cxgb_teardown_interrupts(adapter_t *sc)
857 {
858 	int i;
859 
860 	for (i = 0; i < SGE_QSETS; i++) {
861 		if (sc->msix_intr_tag[i] == NULL) {
862 
863 			/* Should have been setup fully or not at all */
864 			KASSERT(sc->msix_irq_res[i] == NULL &&
865 				sc->msix_irq_rid[i] == 0,
866 				("%s: half-done interrupt (%d).", __func__, i));
867 
868 			continue;
869 		}
870 
871 		bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
872 				  sc->msix_intr_tag[i]);
873 		bus_release_resource(sc->dev, SYS_RES_IRQ, sc->msix_irq_rid[i],
874 				     sc->msix_irq_res[i]);
875 
876 		sc->msix_irq_res[i] = sc->msix_intr_tag[i] = NULL;
877 		sc->msix_irq_rid[i] = 0;
878 	}
879 
880 	if (sc->intr_tag) {
881 		KASSERT(sc->irq_res != NULL,
882 			("%s: half-done interrupt.", __func__));
883 
884 		bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
885 		bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
886 				     sc->irq_res);
887 
888 		sc->irq_res = sc->intr_tag = NULL;
889 		sc->irq_rid = 0;
890 	}
891 }
892 
893 static int
cxgb_setup_interrupts(adapter_t * sc)894 cxgb_setup_interrupts(adapter_t *sc)
895 {
896 	struct resource *res;
897 	void *tag;
898 	int i, rid, err, intr_flag = sc->flags & (USING_MSI | USING_MSIX);
899 
900 	sc->irq_rid = intr_flag ? 1 : 0;
901 	sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid,
902 					     RF_SHAREABLE | RF_ACTIVE);
903 	if (sc->irq_res == NULL) {
904 		device_printf(sc->dev, "Cannot allocate interrupt (%x, %u)\n",
905 			      intr_flag, sc->irq_rid);
906 		err = EINVAL;
907 		sc->irq_rid = 0;
908 	} else {
909 		err = bus_setup_intr(sc->dev, sc->irq_res,
910 		    INTR_MPSAFE | INTR_TYPE_NET, NULL,
911 		    sc->cxgb_intr, sc, &sc->intr_tag);
912 
913 		if (err) {
914 			device_printf(sc->dev,
915 				      "Cannot set up interrupt (%x, %u, %d)\n",
916 				      intr_flag, sc->irq_rid, err);
917 			bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
918 					     sc->irq_res);
919 			sc->irq_res = sc->intr_tag = NULL;
920 			sc->irq_rid = 0;
921 		}
922 	}
923 
924 	/* That's all for INTx or MSI */
925 	if (!(intr_flag & USING_MSIX) || err)
926 		return (err);
927 
928 	bus_describe_intr(sc->dev, sc->irq_res, sc->intr_tag, "err");
929 	for (i = 0; i < sc->msi_count - 1; i++) {
930 		rid = i + 2;
931 		res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid,
932 					     RF_SHAREABLE | RF_ACTIVE);
933 		if (res == NULL) {
934 			device_printf(sc->dev, "Cannot allocate interrupt "
935 				      "for message %d\n", rid);
936 			err = EINVAL;
937 			break;
938 		}
939 
940 		err = bus_setup_intr(sc->dev, res, INTR_MPSAFE | INTR_TYPE_NET,
941 				     NULL, t3_intr_msix, &sc->sge.qs[i], &tag);
942 		if (err) {
943 			device_printf(sc->dev, "Cannot set up interrupt "
944 				      "for message %d (%d)\n", rid, err);
945 			bus_release_resource(sc->dev, SYS_RES_IRQ, rid, res);
946 			break;
947 		}
948 
949 		sc->msix_irq_rid[i] = rid;
950 		sc->msix_irq_res[i] = res;
951 		sc->msix_intr_tag[i] = tag;
952 		bus_describe_intr(sc->dev, res, tag, "qs%d", i);
953 	}
954 
955 	if (err)
956 		cxgb_teardown_interrupts(sc);
957 
958 	return (err);
959 }
960 
961 
962 static int
cxgb_port_probe(device_t dev)963 cxgb_port_probe(device_t dev)
964 {
965 	struct port_info *p;
966 	const char *desc;
967 
968 	p = device_get_softc(dev);
969 	desc = p->phy.desc;
970 	device_set_descf(dev, "Port %d %s", p->port_id, desc);
971 	return (0);
972 }
973 
974 
975 static int
cxgb_makedev(struct port_info * pi)976 cxgb_makedev(struct port_info *pi)
977 {
978 
979 	pi->port_cdev = make_dev(&cxgb_cdevsw, if_getdunit(pi->ifp),
980 	    UID_ROOT, GID_WHEEL, 0600, "%s", if_name(pi->ifp));
981 
982 	if (pi->port_cdev == NULL)
983 		return (ENOMEM);
984 
985 	pi->port_cdev->si_drv1 = (void *)pi;
986 
987 	return (0);
988 }
989 
990 #define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
991     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
992     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6)
993 #define CXGB_CAP_ENABLE CXGB_CAP
994 
995 static int
cxgb_port_attach(device_t dev)996 cxgb_port_attach(device_t dev)
997 {
998 	struct port_info *p;
999 	if_t ifp;
1000 	int err;
1001 	struct adapter *sc;
1002 
1003 	p = device_get_softc(dev);
1004 	sc = p->adapter;
1005 	snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
1006 	    device_get_unit(device_get_parent(dev)), p->port_id);
1007 	PORT_LOCK_INIT(p, p->lockbuf);
1008 
1009 	callout_init(&p->link_check_ch, 1);
1010 	TASK_INIT(&p->link_check_task, 0, check_link_status, p);
1011 
1012 	/* Allocate an ifnet object and set it up */
1013 	ifp = p->ifp = if_alloc(IFT_ETHER);
1014 	if (ifp == NULL) {
1015 		device_printf(dev, "Cannot allocate ifnet\n");
1016 		return (ENOMEM);
1017 	}
1018 
1019 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1020 	if_setinitfn(ifp, cxgb_init);
1021 	if_setsoftc(ifp, p);
1022 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
1023 	if_setioctlfn(ifp, cxgb_ioctl);
1024 	if_settransmitfn(ifp, cxgb_transmit);
1025 	if_setqflushfn(ifp, cxgb_qflush);
1026 	if_setgetcounterfn(ifp, cxgb_get_counter);
1027 
1028 	if_setcapabilities(ifp, CXGB_CAP);
1029 #ifdef TCP_OFFLOAD
1030 	if (is_offload(sc))
1031 		if_setcapabilitiesbit(ifp, IFCAP_TOE4, 0);
1032 #endif
1033 	if_setcapenable(ifp, CXGB_CAP_ENABLE);
1034 	if_sethwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1035 	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1036 	if_sethwtsomax(ifp, IP_MAXPACKET);
1037 	if_sethwtsomaxsegcount(ifp, 36);
1038 	if_sethwtsomaxsegsize(ifp, 65536);
1039 
1040 	/*
1041 	 * Disable TSO on 4-port - it isn't supported by the firmware.
1042 	 */
1043 	if (sc->params.nports > 2) {
1044 		if_setcapabilitiesbit(ifp, 0, IFCAP_TSO | IFCAP_VLAN_HWTSO);
1045 		if_setcapenablebit(ifp, 0, IFCAP_TSO | IFCAP_VLAN_HWTSO);
1046 		if_sethwassistbits(ifp, 0, CSUM_TSO);
1047 	}
1048 
1049 	ether_ifattach(ifp, p->hw_addr);
1050 
1051 	/* Attach driver debugnet methods. */
1052 	DEBUGNET_SET(ifp, cxgb);
1053 
1054 #ifdef DEFAULT_JUMBO
1055 	if (sc->params.nports <= 2)
1056 		if_setmtu(ifp, ETHERMTU_JUMBO);
1057 #endif
1058 	if ((err = cxgb_makedev(p)) != 0) {
1059 		printf("makedev failed %d\n", err);
1060 		return (err);
1061 	}
1062 
1063 	/* Create a list of media supported by this port */
1064 	ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
1065 	    cxgb_media_status);
1066 	cxgb_build_medialist(p);
1067 
1068 	t3_sge_init_port(p);
1069 
1070 	return (err);
1071 }
1072 
1073 /*
1074  * cxgb_port_detach() is called via the device_detach methods when
1075  * cxgb_free() calls the bus_generic_detach.  It is responsible for
1076  * removing the device from the view of the kernel, i.e. from all
1077  * interfaces lists etc.  This routine is only called when the driver is
1078  * being unloaded, not when the link goes down.
1079  */
1080 static int
cxgb_port_detach(device_t dev)1081 cxgb_port_detach(device_t dev)
1082 {
1083 	struct port_info *p;
1084 	struct adapter *sc;
1085 	int i;
1086 
1087 	p = device_get_softc(dev);
1088 	sc = p->adapter;
1089 
1090 	/* Tell cxgb_ioctl and if_init that the port is going away */
1091 	ADAPTER_LOCK(sc);
1092 	SET_DOOMED(p);
1093 	wakeup(&sc->flags);
1094 	while (IS_BUSY(sc))
1095 		mtx_sleep(&sc->flags, &sc->lock, 0, "cxgbdtch", 0);
1096 	SET_BUSY(sc);
1097 	ADAPTER_UNLOCK(sc);
1098 
1099 	if (p->port_cdev != NULL)
1100 		destroy_dev(p->port_cdev);
1101 
1102 	cxgb_uninit_synchronized(p);
1103 	ether_ifdetach(p->ifp);
1104 
1105 	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1106 		struct sge_qset *qs = &sc->sge.qs[i];
1107 		struct sge_txq *txq = &qs->txq[TXQ_ETH];
1108 
1109 		callout_drain(&txq->txq_watchdog);
1110 		callout_drain(&txq->txq_timer);
1111 	}
1112 
1113 	PORT_LOCK_DEINIT(p);
1114 	if_free(p->ifp);
1115 	p->ifp = NULL;
1116 
1117 	ADAPTER_LOCK(sc);
1118 	CLR_BUSY(sc);
1119 	wakeup_one(&sc->flags);
1120 	ADAPTER_UNLOCK(sc);
1121 	return (0);
1122 }
1123 
1124 void
t3_fatal_err(struct adapter * sc)1125 t3_fatal_err(struct adapter *sc)
1126 {
1127 	u_int fw_status[4];
1128 
1129 	if (sc->flags & FULL_INIT_DONE) {
1130 		t3_sge_stop(sc);
1131 		t3_write_reg(sc, A_XGM_TX_CTRL, 0);
1132 		t3_write_reg(sc, A_XGM_RX_CTRL, 0);
1133 		t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0);
1134 		t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0);
1135 		t3_intr_disable(sc);
1136 	}
1137 	device_printf(sc->dev,"encountered fatal error, operation suspended\n");
1138 	if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status))
1139 		device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
1140 		    fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
1141 }
1142 
1143 int
t3_os_find_pci_capability(adapter_t * sc,int cap)1144 t3_os_find_pci_capability(adapter_t *sc, int cap)
1145 {
1146 	device_t dev;
1147 	struct pci_devinfo *dinfo;
1148 	pcicfgregs *cfg;
1149 	uint32_t status;
1150 	uint8_t ptr;
1151 
1152 	dev = sc->dev;
1153 	dinfo = device_get_ivars(dev);
1154 	cfg = &dinfo->cfg;
1155 
1156 	status = pci_read_config(dev, PCIR_STATUS, 2);
1157 	if (!(status & PCIM_STATUS_CAPPRESENT))
1158 		return (0);
1159 
1160 	switch (cfg->hdrtype & PCIM_HDRTYPE) {
1161 	case 0:
1162 	case 1:
1163 		ptr = PCIR_CAP_PTR;
1164 		break;
1165 	case 2:
1166 		ptr = PCIR_CAP_PTR_2;
1167 		break;
1168 	default:
1169 		return (0);
1170 		break;
1171 	}
1172 	ptr = pci_read_config(dev, ptr, 1);
1173 
1174 	while (ptr != 0) {
1175 		if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
1176 			return (ptr);
1177 		ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1178 	}
1179 
1180 	return (0);
1181 }
1182 
1183 int
t3_os_pci_save_state(struct adapter * sc)1184 t3_os_pci_save_state(struct adapter *sc)
1185 {
1186 	device_t dev;
1187 	struct pci_devinfo *dinfo;
1188 
1189 	dev = sc->dev;
1190 	dinfo = device_get_ivars(dev);
1191 
1192 	pci_cfg_save(dev, dinfo, 0);
1193 	return (0);
1194 }
1195 
1196 int
t3_os_pci_restore_state(struct adapter * sc)1197 t3_os_pci_restore_state(struct adapter *sc)
1198 {
1199 	device_t dev;
1200 	struct pci_devinfo *dinfo;
1201 
1202 	dev = sc->dev;
1203 	dinfo = device_get_ivars(dev);
1204 
1205 	pci_cfg_restore(dev, dinfo);
1206 	return (0);
1207 }
1208 
1209 /**
1210  *	t3_os_link_changed - handle link status changes
1211  *	@sc: the adapter associated with the link change
1212  *	@port_id: the port index whose link status has changed
1213  *	@link_status: the new status of the link
1214  *	@speed: the new speed setting
1215  *	@duplex: the new duplex setting
1216  *	@fc: the new flow-control setting
1217  *
1218  *	This is the OS-dependent handler for link status changes.  The OS
1219  *	neutral handler takes care of most of the processing for these events,
1220  *	then calls this handler for any OS-specific processing.
1221  */
1222 void
t3_os_link_changed(adapter_t * adapter,int port_id,int link_status,int speed,int duplex,int fc,int mac_was_reset)1223 t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
1224      int duplex, int fc, int mac_was_reset)
1225 {
1226 	struct port_info *pi = &adapter->port[port_id];
1227 	if_t ifp = pi->ifp;
1228 
1229 	/* no race with detach, so ifp should always be good */
1230 	KASSERT(ifp, ("%s: if detached.", __func__));
1231 
1232 	/* Reapply mac settings if they were lost due to a reset */
1233 	if (mac_was_reset) {
1234 		PORT_LOCK(pi);
1235 		cxgb_update_mac_settings(pi);
1236 		PORT_UNLOCK(pi);
1237 	}
1238 
1239 	if (link_status) {
1240 		if_setbaudrate(ifp, IF_Mbps(speed));
1241 		if_link_state_change(ifp, LINK_STATE_UP);
1242 	} else
1243 		if_link_state_change(ifp, LINK_STATE_DOWN);
1244 }
1245 
1246 /**
1247  *	t3_os_phymod_changed - handle PHY module changes
1248  *	@phy: the PHY reporting the module change
1249  *	@mod_type: new module type
1250  *
1251  *	This is the OS-dependent handler for PHY module changes.  It is
1252  *	invoked when a PHY module is removed or inserted for any OS-specific
1253  *	processing.
1254  */
t3_os_phymod_changed(struct adapter * adap,int port_id)1255 void t3_os_phymod_changed(struct adapter *adap, int port_id)
1256 {
1257 	static const char *mod_str[] = {
1258 		NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX-L", "unknown"
1259 	};
1260 	struct port_info *pi = &adap->port[port_id];
1261 	int mod = pi->phy.modtype;
1262 
1263 	if (mod != pi->media.ifm_cur->ifm_data)
1264 		cxgb_build_medialist(pi);
1265 
1266 	if (mod == phy_modtype_none)
1267 		if_printf(pi->ifp, "PHY module unplugged\n");
1268 	else {
1269 		KASSERT(mod < ARRAY_SIZE(mod_str),
1270 			("invalid PHY module type %d", mod));
1271 		if_printf(pi->ifp, "%s PHY module inserted\n", mod_str[mod]);
1272 	}
1273 }
1274 
1275 void
t3_os_set_hw_addr(adapter_t * adapter,int port_idx,u8 hw_addr[])1276 t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
1277 {
1278 
1279 	/*
1280 	 * The ifnet might not be allocated before this gets called,
1281 	 * as this is called early on in attach by t3_prep_adapter
1282 	 * save the address off in the port structure
1283 	 */
1284 	if (cxgb_debug)
1285 		printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":");
1286 	bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
1287 }
1288 
1289 /*
1290  * Programs the XGMAC based on the settings in the ifnet.  These settings
1291  * include MTU, MAC address, mcast addresses, etc.
1292  */
1293 static void
cxgb_update_mac_settings(struct port_info * p)1294 cxgb_update_mac_settings(struct port_info *p)
1295 {
1296 	if_t ifp = p->ifp;
1297 	struct t3_rx_mode rm;
1298 	struct cmac *mac = &p->mac;
1299 	int mtu, hwtagging;
1300 
1301 	PORT_LOCK_ASSERT_OWNED(p);
1302 
1303 	bcopy(if_getlladdr(ifp), p->hw_addr, ETHER_ADDR_LEN);
1304 
1305 	mtu = if_getmtu(ifp);
1306 	if (if_getcapenable(ifp) & IFCAP_VLAN_MTU)
1307 		mtu += ETHER_VLAN_ENCAP_LEN;
1308 
1309 	hwtagging = (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0;
1310 
1311 	t3_mac_set_mtu(mac, mtu);
1312 	t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging);
1313 	t3_mac_set_address(mac, 0, p->hw_addr);
1314 	t3_init_rx_mode(&rm, p);
1315 	t3_mac_set_rx_mode(mac, &rm);
1316 }
1317 
1318 
1319 static int
await_mgmt_replies(struct adapter * adap,unsigned long init_cnt,unsigned long n)1320 await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
1321 			      unsigned long n)
1322 {
1323 	int attempts = 5;
1324 
1325 	while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
1326 		if (!--attempts)
1327 			return (ETIMEDOUT);
1328 		t3_os_sleep(10);
1329 	}
1330 	return 0;
1331 }
1332 
1333 static int
init_tp_parity(struct adapter * adap)1334 init_tp_parity(struct adapter *adap)
1335 {
1336 	int i;
1337 	struct mbuf *m;
1338 	struct cpl_set_tcb_field *greq;
1339 	unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
1340 
1341 	t3_tp_set_offload_mode(adap, 1);
1342 
1343 	for (i = 0; i < 16; i++) {
1344 		struct cpl_smt_write_req *req;
1345 
1346 		m = m_gethdr(M_WAITOK, MT_DATA);
1347 		req = mtod(m, struct cpl_smt_write_req *);
1348 		m->m_len = m->m_pkthdr.len = sizeof(*req);
1349 		memset(req, 0, sizeof(*req));
1350 		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1351 		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
1352 		req->iff = i;
1353 		t3_mgmt_tx(adap, m);
1354 	}
1355 
1356 	for (i = 0; i < 2048; i++) {
1357 		struct cpl_l2t_write_req *req;
1358 
1359 		m = m_gethdr(M_WAITOK, MT_DATA);
1360 		req = mtod(m, struct cpl_l2t_write_req *);
1361 		m->m_len = m->m_pkthdr.len = sizeof(*req);
1362 		memset(req, 0, sizeof(*req));
1363 		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1364 		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
1365 		req->params = htonl(V_L2T_W_IDX(i));
1366 		t3_mgmt_tx(adap, m);
1367 	}
1368 
1369 	for (i = 0; i < 2048; i++) {
1370 		struct cpl_rte_write_req *req;
1371 
1372 		m = m_gethdr(M_WAITOK, MT_DATA);
1373 		req = mtod(m, struct cpl_rte_write_req *);
1374 		m->m_len = m->m_pkthdr.len = sizeof(*req);
1375 		memset(req, 0, sizeof(*req));
1376 		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1377 		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
1378 		req->l2t_idx = htonl(V_L2T_W_IDX(i));
1379 		t3_mgmt_tx(adap, m);
1380 	}
1381 
1382 	m = m_gethdr(M_WAITOK, MT_DATA);
1383 	greq = mtod(m, struct cpl_set_tcb_field *);
1384 	m->m_len = m->m_pkthdr.len = sizeof(*greq);
1385 	memset(greq, 0, sizeof(*greq));
1386 	greq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1387 	OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
1388 	greq->mask = htobe64(1);
1389 	t3_mgmt_tx(adap, m);
1390 
1391 	i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
1392 	t3_tp_set_offload_mode(adap, 0);
1393 	return (i);
1394 }
1395 
1396 /**
1397  *	setup_rss - configure Receive Side Steering (per-queue connection demux)
1398  *	@adap: the adapter
1399  *
1400  *	Sets up RSS to distribute packets to multiple receive queues.  We
1401  *	configure the RSS CPU lookup table to distribute to the number of HW
1402  *	receive queues, and the response queue lookup table to narrow that
1403  *	down to the response queues actually configured for each port.
1404  *	We always configure the RSS mapping for two ports since the mapping
1405  *	table has plenty of entries.
1406  */
1407 static void
setup_rss(adapter_t * adap)1408 setup_rss(adapter_t *adap)
1409 {
1410 	int i;
1411 	u_int nq[2];
1412 	uint8_t cpus[SGE_QSETS + 1];
1413 	uint16_t rspq_map[RSS_TABLE_SIZE];
1414 
1415 	for (i = 0; i < SGE_QSETS; ++i)
1416 		cpus[i] = i;
1417 	cpus[SGE_QSETS] = 0xff;
1418 
1419 	nq[0] = nq[1] = 0;
1420 	for_each_port(adap, i) {
1421 		const struct port_info *pi = adap2pinfo(adap, i);
1422 
1423 		nq[pi->tx_chan] += pi->nqsets;
1424 	}
1425 	for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
1426 		rspq_map[i] = nq[0] ? i % nq[0] : 0;
1427 		rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0;
1428 	}
1429 
1430 	/* Calculate the reverse RSS map table */
1431 	for (i = 0; i < SGE_QSETS; ++i)
1432 		adap->rrss_map[i] = 0xff;
1433 	for (i = 0; i < RSS_TABLE_SIZE; ++i)
1434 		if (adap->rrss_map[rspq_map[i]] == 0xff)
1435 			adap->rrss_map[rspq_map[i]] = i;
1436 
1437 	t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
1438 		      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN |
1439 	              F_RRCPLMAPEN | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ,
1440 	              cpus, rspq_map);
1441 
1442 }
1443 static void
send_pktsched_cmd(struct adapter * adap,int sched,int qidx,int lo,int hi,int port)1444 send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
1445 			      int hi, int port)
1446 {
1447 	struct mbuf *m;
1448 	struct mngt_pktsched_wr *req;
1449 
1450 	m = m_gethdr(M_NOWAIT, MT_DATA);
1451 	if (m) {
1452 		req = mtod(m, struct mngt_pktsched_wr *);
1453 		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
1454 		req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
1455 		req->sched = sched;
1456 		req->idx = qidx;
1457 		req->min = lo;
1458 		req->max = hi;
1459 		req->binding = port;
1460 		m->m_len = m->m_pkthdr.len = sizeof(*req);
1461 		t3_mgmt_tx(adap, m);
1462 	}
1463 }
1464 
1465 static void
bind_qsets(adapter_t * sc)1466 bind_qsets(adapter_t *sc)
1467 {
1468 	int i, j;
1469 
1470 	for (i = 0; i < (sc)->params.nports; ++i) {
1471 		const struct port_info *pi = adap2pinfo(sc, i);
1472 
1473 		for (j = 0; j < pi->nqsets; ++j) {
1474 			send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
1475 					  -1, pi->tx_chan);
1476 
1477 		}
1478 	}
1479 }
1480 
1481 static void
update_tpeeprom(struct adapter * adap)1482 update_tpeeprom(struct adapter *adap)
1483 {
1484 	const struct firmware *tpeeprom;
1485 
1486 	uint32_t version;
1487 	unsigned int major, minor;
1488 	int ret, len;
1489 	char rev, name[32];
1490 
1491 	t3_seeprom_read(adap, TP_SRAM_OFFSET, &version);
1492 
1493 	major = G_TP_VERSION_MAJOR(version);
1494 	minor = G_TP_VERSION_MINOR(version);
1495 	if (major == TP_VERSION_MAJOR  && minor == TP_VERSION_MINOR)
1496 		return;
1497 
1498 	rev = t3rev2char(adap);
1499 	snprintf(name, sizeof(name), TPEEPROM_NAME, rev);
1500 
1501 	tpeeprom = firmware_get(name);
1502 	if (tpeeprom == NULL) {
1503 		device_printf(adap->dev,
1504 			      "could not load TP EEPROM: unable to load %s\n",
1505 			      name);
1506 		return;
1507 	}
1508 
1509 	len = tpeeprom->datasize - 4;
1510 
1511 	ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize);
1512 	if (ret)
1513 		goto release_tpeeprom;
1514 
1515 	if (len != TP_SRAM_LEN) {
1516 		device_printf(adap->dev,
1517 			      "%s length is wrong len=%d expected=%d\n", name,
1518 			      len, TP_SRAM_LEN);
1519 		return;
1520 	}
1521 
1522 	ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize,
1523 	    TP_SRAM_OFFSET);
1524 
1525 	if (!ret) {
1526 		device_printf(adap->dev,
1527 			"Protocol SRAM image updated in EEPROM to %d.%d.%d\n",
1528 			 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1529 	} else
1530 		device_printf(adap->dev,
1531 			      "Protocol SRAM image update in EEPROM failed\n");
1532 
1533 release_tpeeprom:
1534 	firmware_put(tpeeprom, FIRMWARE_UNLOAD);
1535 
1536 	return;
1537 }
1538 
1539 static int
update_tpsram(struct adapter * adap)1540 update_tpsram(struct adapter *adap)
1541 {
1542 	const struct firmware *tpsram;
1543 	int ret;
1544 	char rev, name[32];
1545 
1546 	rev = t3rev2char(adap);
1547 	snprintf(name, sizeof(name), TPSRAM_NAME, rev);
1548 
1549 	update_tpeeprom(adap);
1550 
1551 	tpsram = firmware_get(name);
1552 	if (tpsram == NULL){
1553 		device_printf(adap->dev, "could not load TP SRAM\n");
1554 		return (EINVAL);
1555 	} else
1556 		device_printf(adap->dev, "updating TP SRAM\n");
1557 
1558 	ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize);
1559 	if (ret)
1560 		goto release_tpsram;
1561 
1562 	ret = t3_set_proto_sram(adap, tpsram->data);
1563 	if (ret)
1564 		device_printf(adap->dev, "loading protocol SRAM failed\n");
1565 
1566 release_tpsram:
1567 	firmware_put(tpsram, FIRMWARE_UNLOAD);
1568 
1569 	return ret;
1570 }
1571 
1572 /**
1573  *	cxgb_up - enable the adapter
1574  *	@adap: adapter being enabled
1575  *
1576  *	Called when the first port is enabled, this function performs the
1577  *	actions necessary to make an adapter operational, such as completing
1578  *	the initialization of HW modules, and enabling interrupts.
1579  */
1580 static int
cxgb_up(struct adapter * sc)1581 cxgb_up(struct adapter *sc)
1582 {
1583 	int err = 0;
1584 	unsigned int mxf = t3_mc5_size(&sc->mc5) - MC5_MIN_TIDS;
1585 
1586 	KASSERT(sc->open_device_map == 0, ("%s: device(s) already open (%x)",
1587 					   __func__, sc->open_device_map));
1588 
1589 	if ((sc->flags & FULL_INIT_DONE) == 0) {
1590 
1591 		ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1592 
1593 		if ((sc->flags & FW_UPTODATE) == 0)
1594 			if ((err = upgrade_fw(sc)))
1595 				goto out;
1596 
1597 		if ((sc->flags & TPS_UPTODATE) == 0)
1598 			if ((err = update_tpsram(sc)))
1599 				goto out;
1600 
1601 		if (is_offload(sc) && nfilters != 0) {
1602 			sc->params.mc5.nservers = 0;
1603 
1604 			if (nfilters < 0)
1605 				sc->params.mc5.nfilters = mxf;
1606 			else
1607 				sc->params.mc5.nfilters = min(nfilters, mxf);
1608 		}
1609 
1610 		err = t3_init_hw(sc, 0);
1611 		if (err)
1612 			goto out;
1613 
1614 		t3_set_reg_field(sc, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1615 		t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1616 
1617 		err = setup_sge_qsets(sc);
1618 		if (err)
1619 			goto out;
1620 
1621 		alloc_filters(sc);
1622 		setup_rss(sc);
1623 
1624 		t3_add_configured_sysctls(sc);
1625 		sc->flags |= FULL_INIT_DONE;
1626 	}
1627 
1628 	t3_intr_clear(sc);
1629 	t3_sge_start(sc);
1630 	t3_intr_enable(sc);
1631 
1632 	if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) &&
1633 	    is_offload(sc) && init_tp_parity(sc) == 0)
1634 		sc->flags |= TP_PARITY_INIT;
1635 
1636 	if (sc->flags & TP_PARITY_INIT) {
1637 		t3_write_reg(sc, A_TP_INT_CAUSE, F_CMCACHEPERR | F_ARPLUTPERR);
1638 		t3_write_reg(sc, A_TP_INT_ENABLE, 0x7fbfffff);
1639 	}
1640 
1641 	if (!(sc->flags & QUEUES_BOUND)) {
1642 		bind_qsets(sc);
1643 		setup_hw_filters(sc);
1644 		sc->flags |= QUEUES_BOUND;
1645 	}
1646 
1647 	t3_sge_reset_adapter(sc);
1648 out:
1649 	return (err);
1650 }
1651 
1652 /*
1653  * Called when the last open device is closed.  Does NOT undo all of cxgb_up's
1654  * work.  Specifically, the resources grabbed under FULL_INIT_DONE are released
1655  * during controller_detach, not here.
1656  */
1657 static void
cxgb_down(struct adapter * sc)1658 cxgb_down(struct adapter *sc)
1659 {
1660 	t3_sge_stop(sc);
1661 	t3_intr_disable(sc);
1662 }
1663 
1664 /*
1665  * if_init for cxgb ports.
1666  */
1667 static void
cxgb_init(void * arg)1668 cxgb_init(void *arg)
1669 {
1670 	struct port_info *p = arg;
1671 	struct adapter *sc = p->adapter;
1672 
1673 	ADAPTER_LOCK(sc);
1674 	cxgb_init_locked(p); /* releases adapter lock */
1675 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1676 }
1677 
1678 static int
cxgb_init_locked(struct port_info * p)1679 cxgb_init_locked(struct port_info *p)
1680 {
1681 	struct adapter *sc = p->adapter;
1682 	if_t ifp = p->ifp;
1683 	struct cmac *mac = &p->mac;
1684 	int i, rc = 0, may_sleep = 0, gave_up_lock = 0;
1685 
1686 	ADAPTER_LOCK_ASSERT_OWNED(sc);
1687 
1688 	while (!IS_DOOMED(p) && IS_BUSY(sc)) {
1689 		gave_up_lock = 1;
1690 		if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbinit", 0)) {
1691 			rc = EINTR;
1692 			goto done;
1693 		}
1694 	}
1695 	if (IS_DOOMED(p)) {
1696 		rc = ENXIO;
1697 		goto done;
1698 	}
1699 	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1700 
1701 	/*
1702 	 * The code that runs during one-time adapter initialization can sleep
1703 	 * so it's important not to hold any locks across it.
1704 	 */
1705 	may_sleep = sc->flags & FULL_INIT_DONE ? 0 : 1;
1706 
1707 	if (may_sleep) {
1708 		SET_BUSY(sc);
1709 		gave_up_lock = 1;
1710 		ADAPTER_UNLOCK(sc);
1711 	}
1712 
1713 	if (sc->open_device_map == 0 && ((rc = cxgb_up(sc)) != 0))
1714 			goto done;
1715 
1716 	PORT_LOCK(p);
1717 	if (isset(&sc->open_device_map, p->port_id) &&
1718 	    (if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
1719 		PORT_UNLOCK(p);
1720 		goto done;
1721 	}
1722 	t3_port_intr_enable(sc, p->port_id);
1723 	if (!mac->multiport)
1724 		t3_mac_init(mac);
1725 	cxgb_update_mac_settings(p);
1726 	t3_link_start(&p->phy, mac, &p->link_config);
1727 	t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
1728 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
1729 	PORT_UNLOCK(p);
1730 
1731 	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1732 		struct sge_qset *qs = &sc->sge.qs[i];
1733 		struct sge_txq *txq = &qs->txq[TXQ_ETH];
1734 
1735 		callout_reset_on(&txq->txq_watchdog, hz, cxgb_tx_watchdog, qs,
1736 				 txq->txq_watchdog.c_cpu);
1737 	}
1738 
1739 	/* all ok */
1740 	setbit(&sc->open_device_map, p->port_id);
1741 	callout_reset(&p->link_check_ch,
1742 	    p->phy.caps & SUPPORTED_LINK_IRQ ?  hz * 3 : hz / 4,
1743 	    link_check_callout, p);
1744 
1745 done:
1746 	if (may_sleep) {
1747 		ADAPTER_LOCK(sc);
1748 		KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1749 		CLR_BUSY(sc);
1750 	}
1751 	if (gave_up_lock)
1752 		wakeup_one(&sc->flags);
1753 	ADAPTER_UNLOCK(sc);
1754 	return (rc);
1755 }
1756 
1757 static int
cxgb_uninit_locked(struct port_info * p)1758 cxgb_uninit_locked(struct port_info *p)
1759 {
1760 	struct adapter *sc = p->adapter;
1761 	int rc;
1762 
1763 	ADAPTER_LOCK_ASSERT_OWNED(sc);
1764 
1765 	while (!IS_DOOMED(p) && IS_BUSY(sc)) {
1766 		if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbunin", 0)) {
1767 			rc = EINTR;
1768 			goto done;
1769 		}
1770 	}
1771 	if (IS_DOOMED(p)) {
1772 		rc = ENXIO;
1773 		goto done;
1774 	}
1775 	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1776 	SET_BUSY(sc);
1777 	ADAPTER_UNLOCK(sc);
1778 
1779 	rc = cxgb_uninit_synchronized(p);
1780 
1781 	ADAPTER_LOCK(sc);
1782 	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1783 	CLR_BUSY(sc);
1784 	wakeup_one(&sc->flags);
1785 done:
1786 	ADAPTER_UNLOCK(sc);
1787 	return (rc);
1788 }
1789 
1790 /*
1791  * Called on "ifconfig down", and from port_detach
1792  */
1793 static int
cxgb_uninit_synchronized(struct port_info * pi)1794 cxgb_uninit_synchronized(struct port_info *pi)
1795 {
1796 	struct adapter *sc = pi->adapter;
1797 	if_t ifp = pi->ifp;
1798 
1799 	/*
1800 	 * taskqueue_drain may cause a deadlock if the adapter lock is held.
1801 	 */
1802 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1803 
1804 	/*
1805 	 * Clear this port's bit from the open device map, and then drain all
1806 	 * the tasks that can access/manipulate this port's port_info or ifp.
1807 	 * We disable this port's interrupts here and so the slow/ext
1808 	 * interrupt tasks won't be enqueued.  The tick task will continue to
1809 	 * be enqueued every second but the runs after this drain will not see
1810 	 * this port in the open device map.
1811 	 *
1812 	 * A well behaved task must take open_device_map into account and ignore
1813 	 * ports that are not open.
1814 	 */
1815 	clrbit(&sc->open_device_map, pi->port_id);
1816 	t3_port_intr_disable(sc, pi->port_id);
1817 	taskqueue_drain(sc->tq, &sc->slow_intr_task);
1818 	taskqueue_drain(sc->tq, &sc->tick_task);
1819 
1820 	callout_drain(&pi->link_check_ch);
1821 	taskqueue_drain(sc->tq, &pi->link_check_task);
1822 
1823 	PORT_LOCK(pi);
1824 	if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1825 
1826 	/* disable pause frames */
1827 	t3_set_reg_field(sc, A_XGM_TX_CFG + pi->mac.offset, F_TXPAUSEEN, 0);
1828 
1829 	/* Reset RX FIFO HWM */
1830 	t3_set_reg_field(sc, A_XGM_RXFIFO_CFG +  pi->mac.offset,
1831 			 V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM), 0);
1832 
1833 	DELAY(100 * 1000);
1834 
1835 	/* Wait for TXFIFO empty */
1836 	t3_wait_op_done(sc, A_XGM_TXFIFO_CFG + pi->mac.offset,
1837 			F_TXFIFO_EMPTY, 1, 20, 5);
1838 
1839 	DELAY(100 * 1000);
1840 	t3_mac_disable(&pi->mac, MAC_DIRECTION_RX);
1841 
1842 	pi->phy.ops->power_down(&pi->phy, 1);
1843 
1844 	PORT_UNLOCK(pi);
1845 
1846 	pi->link_config.link_ok = 0;
1847 	t3_os_link_changed(sc, pi->port_id, 0, 0, 0, 0, 0);
1848 
1849 	if (sc->open_device_map == 0)
1850 		cxgb_down(pi->adapter);
1851 
1852 	return (0);
1853 }
1854 
1855 /*
1856  * Mark lro enabled or disabled in all qsets for this port
1857  */
1858 static int
cxgb_set_lro(struct port_info * p,int enabled)1859 cxgb_set_lro(struct port_info *p, int enabled)
1860 {
1861 	int i;
1862 	struct adapter *adp = p->adapter;
1863 	struct sge_qset *q;
1864 
1865 	for (i = 0; i < p->nqsets; i++) {
1866 		q = &adp->sge.qs[p->first_qset + i];
1867 		q->lro.enabled = (enabled != 0);
1868 	}
1869 	return (0);
1870 }
1871 
1872 static int
cxgb_ioctl(if_t ifp,unsigned long command,caddr_t data)1873 cxgb_ioctl(if_t ifp, unsigned long command, caddr_t data)
1874 {
1875 	struct port_info *p = if_getsoftc(ifp);
1876 	struct adapter *sc = p->adapter;
1877 	struct ifreq *ifr = (struct ifreq *)data;
1878 	int flags, error = 0, mtu;
1879 	uint32_t mask;
1880 
1881 	switch (command) {
1882 	case SIOCSIFMTU:
1883 		ADAPTER_LOCK(sc);
1884 		error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
1885 		if (error) {
1886 fail:
1887 			ADAPTER_UNLOCK(sc);
1888 			return (error);
1889 		}
1890 
1891 		mtu = ifr->ifr_mtu;
1892 		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) {
1893 			error = EINVAL;
1894 		} else {
1895 			if_setmtu(ifp, mtu);
1896 			PORT_LOCK(p);
1897 			cxgb_update_mac_settings(p);
1898 			PORT_UNLOCK(p);
1899 		}
1900 		ADAPTER_UNLOCK(sc);
1901 		break;
1902 	case SIOCSIFFLAGS:
1903 		ADAPTER_LOCK(sc);
1904 		if (IS_DOOMED(p)) {
1905 			error = ENXIO;
1906 			goto fail;
1907 		}
1908 		if (if_getflags(ifp) & IFF_UP) {
1909 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1910 				flags = p->if_flags;
1911 				if (((if_getflags(ifp) ^ flags) & IFF_PROMISC) ||
1912 				    ((if_getflags(ifp) ^ flags) & IFF_ALLMULTI)) {
1913 					if (IS_BUSY(sc)) {
1914 						error = EBUSY;
1915 						goto fail;
1916 					}
1917 					PORT_LOCK(p);
1918 					cxgb_update_mac_settings(p);
1919 					PORT_UNLOCK(p);
1920 				}
1921 				ADAPTER_UNLOCK(sc);
1922 			} else
1923 				error = cxgb_init_locked(p);
1924 			p->if_flags = if_getflags(ifp);
1925 		} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1926 			error = cxgb_uninit_locked(p);
1927 		else
1928 			ADAPTER_UNLOCK(sc);
1929 
1930 		ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1931 		break;
1932 	case SIOCADDMULTI:
1933 	case SIOCDELMULTI:
1934 		ADAPTER_LOCK(sc);
1935 		error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
1936 		if (error)
1937 			goto fail;
1938 
1939 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1940 			PORT_LOCK(p);
1941 			cxgb_update_mac_settings(p);
1942 			PORT_UNLOCK(p);
1943 		}
1944 		ADAPTER_UNLOCK(sc);
1945 
1946 		break;
1947 	case SIOCSIFCAP:
1948 		ADAPTER_LOCK(sc);
1949 		error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
1950 		if (error)
1951 			goto fail;
1952 
1953 		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
1954 		if (mask & IFCAP_TXCSUM) {
1955 			if_togglecapenable(ifp, IFCAP_TXCSUM);
1956 			if_togglehwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_IP);
1957 
1958 			if (IFCAP_TSO4 & if_getcapenable(ifp) &&
1959 			    !(IFCAP_TXCSUM & if_getcapenable(ifp))) {
1960 				mask &= ~IFCAP_TSO4;
1961 				if_setcapenablebit(ifp, 0, IFCAP_TSO4);
1962 				if_printf(ifp,
1963 				    "tso4 disabled due to -txcsum.\n");
1964 			}
1965 		}
1966 		if (mask & IFCAP_TXCSUM_IPV6) {
1967 			if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6);
1968 			if_togglehwassist(ifp, CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1969 
1970 			if (IFCAP_TSO6 & if_getcapenable(ifp) &&
1971 			    !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
1972 				mask &= ~IFCAP_TSO6;
1973 				if_setcapenablebit(ifp, 0, IFCAP_TSO6);
1974 				if_printf(ifp,
1975 				    "tso6 disabled due to -txcsum6.\n");
1976 			}
1977 		}
1978 		if (mask & IFCAP_RXCSUM)
1979 			if_togglecapenable(ifp, IFCAP_RXCSUM);
1980 		if (mask & IFCAP_RXCSUM_IPV6)
1981 			if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6);
1982 
1983 		/*
1984 		 * Note that we leave CSUM_TSO alone (it is always set).  The
1985 		 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1986 		 * sending a TSO request our way, so it's sufficient to toggle
1987 		 * IFCAP_TSOx only.
1988 		 */
1989 		if (mask & IFCAP_TSO4) {
1990 			if (!(IFCAP_TSO4 & if_getcapenable(ifp)) &&
1991 			    !(IFCAP_TXCSUM & if_getcapenable(ifp))) {
1992 				if_printf(ifp, "enable txcsum first.\n");
1993 				error = EAGAIN;
1994 				goto fail;
1995 			}
1996 			if_togglecapenable(ifp, IFCAP_TSO4);
1997 		}
1998 		if (mask & IFCAP_TSO6) {
1999 			if (!(IFCAP_TSO6 & if_getcapenable(ifp)) &&
2000 			    !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
2001 				if_printf(ifp, "enable txcsum6 first.\n");
2002 				error = EAGAIN;
2003 				goto fail;
2004 			}
2005 			if_togglecapenable(ifp, IFCAP_TSO6);
2006 		}
2007 		if (mask & IFCAP_LRO) {
2008 			if_togglecapenable(ifp, IFCAP_LRO);
2009 
2010 			/* Safe to do this even if cxgb_up not called yet */
2011 			cxgb_set_lro(p, if_getcapenable(ifp) & IFCAP_LRO);
2012 		}
2013 #ifdef TCP_OFFLOAD
2014 		if (mask & IFCAP_TOE4) {
2015 			int enable = (if_getcapenable(ifp) ^ mask) & IFCAP_TOE4;
2016 
2017 			error = toe_capability(p, enable);
2018 			if (error == 0)
2019 				if_togglecapenable(ifp, mask);
2020 		}
2021 #endif
2022 		if (mask & IFCAP_VLAN_HWTAGGING) {
2023 			if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
2024 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2025 				PORT_LOCK(p);
2026 				cxgb_update_mac_settings(p);
2027 				PORT_UNLOCK(p);
2028 			}
2029 		}
2030 		if (mask & IFCAP_VLAN_MTU) {
2031 			if_togglecapenable(ifp, IFCAP_VLAN_MTU);
2032 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2033 				PORT_LOCK(p);
2034 				cxgb_update_mac_settings(p);
2035 				PORT_UNLOCK(p);
2036 			}
2037 		}
2038 		if (mask & IFCAP_VLAN_HWTSO)
2039 			if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
2040 		if (mask & IFCAP_VLAN_HWCSUM)
2041 			if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM);
2042 
2043 #ifdef VLAN_CAPABILITIES
2044 		VLAN_CAPABILITIES(ifp);
2045 #endif
2046 		ADAPTER_UNLOCK(sc);
2047 		break;
2048 	case SIOCSIFMEDIA:
2049 	case SIOCGIFMEDIA:
2050 		error = ifmedia_ioctl(ifp, ifr, &p->media, command);
2051 		break;
2052 	default:
2053 		error = ether_ioctl(ifp, command, data);
2054 	}
2055 
2056 	return (error);
2057 }
2058 
2059 static int
cxgb_media_change(if_t ifp)2060 cxgb_media_change(if_t ifp)
2061 {
2062 	return (EOPNOTSUPP);
2063 }
2064 
2065 /*
2066  * Translates phy->modtype to the correct Ethernet media subtype.
2067  */
2068 static int
cxgb_ifm_type(int mod)2069 cxgb_ifm_type(int mod)
2070 {
2071 	switch (mod) {
2072 	case phy_modtype_sr:
2073 		return (IFM_10G_SR);
2074 	case phy_modtype_lr:
2075 		return (IFM_10G_LR);
2076 	case phy_modtype_lrm:
2077 		return (IFM_10G_LRM);
2078 	case phy_modtype_twinax:
2079 		return (IFM_10G_TWINAX);
2080 	case phy_modtype_twinax_long:
2081 		return (IFM_10G_TWINAX_LONG);
2082 	case phy_modtype_none:
2083 		return (IFM_NONE);
2084 	case phy_modtype_unknown:
2085 		return (IFM_UNKNOWN);
2086 	}
2087 
2088 	KASSERT(0, ("%s: modtype %d unknown", __func__, mod));
2089 	return (IFM_UNKNOWN);
2090 }
2091 
2092 /*
2093  * Rebuilds the ifmedia list for this port, and sets the current media.
2094  */
2095 static void
cxgb_build_medialist(struct port_info * p)2096 cxgb_build_medialist(struct port_info *p)
2097 {
2098 	struct cphy *phy = &p->phy;
2099 	struct ifmedia *media = &p->media;
2100 	int mod = phy->modtype;
2101 	int m = IFM_ETHER | IFM_FDX;
2102 
2103 	PORT_LOCK(p);
2104 
2105 	ifmedia_removeall(media);
2106 	if (phy->caps & SUPPORTED_TP && phy->caps & SUPPORTED_Autoneg) {
2107 		/* Copper (RJ45) */
2108 
2109 		if (phy->caps & SUPPORTED_10000baseT_Full)
2110 			ifmedia_add(media, m | IFM_10G_T, mod, NULL);
2111 
2112 		if (phy->caps & SUPPORTED_1000baseT_Full)
2113 			ifmedia_add(media, m | IFM_1000_T, mod, NULL);
2114 
2115 		if (phy->caps & SUPPORTED_100baseT_Full)
2116 			ifmedia_add(media, m | IFM_100_TX, mod, NULL);
2117 
2118 		if (phy->caps & SUPPORTED_10baseT_Full)
2119 			ifmedia_add(media, m | IFM_10_T, mod, NULL);
2120 
2121 		ifmedia_add(media, IFM_ETHER | IFM_AUTO, mod, NULL);
2122 		ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2123 
2124 	} else if (phy->caps & SUPPORTED_TP) {
2125 		/* Copper (CX4) */
2126 
2127 		KASSERT(phy->caps & SUPPORTED_10000baseT_Full,
2128 			("%s: unexpected cap 0x%x", __func__, phy->caps));
2129 
2130 		ifmedia_add(media, m | IFM_10G_CX4, mod, NULL);
2131 		ifmedia_set(media, m | IFM_10G_CX4);
2132 
2133 	} else if (phy->caps & SUPPORTED_FIBRE &&
2134 		   phy->caps & SUPPORTED_10000baseT_Full) {
2135 		/* 10G optical (but includes SFP+ twinax) */
2136 
2137 		m |= cxgb_ifm_type(mod);
2138 		if (IFM_SUBTYPE(m) == IFM_NONE)
2139 			m &= ~IFM_FDX;
2140 
2141 		ifmedia_add(media, m, mod, NULL);
2142 		ifmedia_set(media, m);
2143 
2144 	} else if (phy->caps & SUPPORTED_FIBRE &&
2145 		   phy->caps & SUPPORTED_1000baseT_Full) {
2146 		/* 1G optical */
2147 
2148 		/* XXX: Lie and claim to be SX, could actually be any 1G-X */
2149 		ifmedia_add(media, m | IFM_1000_SX, mod, NULL);
2150 		ifmedia_set(media, m | IFM_1000_SX);
2151 
2152 	} else {
2153 		KASSERT(0, ("%s: don't know how to handle 0x%x.", __func__,
2154 			    phy->caps));
2155 	}
2156 
2157 	PORT_UNLOCK(p);
2158 }
2159 
2160 static void
cxgb_media_status(if_t ifp,struct ifmediareq * ifmr)2161 cxgb_media_status(if_t ifp, struct ifmediareq *ifmr)
2162 {
2163 	struct port_info *p = if_getsoftc(ifp);
2164 	struct ifmedia_entry *cur = p->media.ifm_cur;
2165 	int speed = p->link_config.speed;
2166 
2167 	if (cur->ifm_data != p->phy.modtype) {
2168 		cxgb_build_medialist(p);
2169 		cur = p->media.ifm_cur;
2170 	}
2171 
2172 	ifmr->ifm_status = IFM_AVALID;
2173 	if (!p->link_config.link_ok)
2174 		return;
2175 
2176 	ifmr->ifm_status |= IFM_ACTIVE;
2177 
2178 	/*
2179 	 * active and current will differ iff current media is autoselect.  That
2180 	 * can happen only for copper RJ45.
2181 	 */
2182 	if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
2183 		return;
2184 	KASSERT(p->phy.caps & SUPPORTED_TP && p->phy.caps & SUPPORTED_Autoneg,
2185 		("%s: unexpected PHY caps 0x%x", __func__, p->phy.caps));
2186 
2187 	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
2188 	if (speed == SPEED_10000)
2189 		ifmr->ifm_active |= IFM_10G_T;
2190 	else if (speed == SPEED_1000)
2191 		ifmr->ifm_active |= IFM_1000_T;
2192 	else if (speed == SPEED_100)
2193 		ifmr->ifm_active |= IFM_100_TX;
2194 	else if (speed == SPEED_10)
2195 		ifmr->ifm_active |= IFM_10_T;
2196 	else
2197 		KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
2198 			    speed));
2199 }
2200 
2201 static uint64_t
cxgb_get_counter(if_t ifp,ift_counter c)2202 cxgb_get_counter(if_t ifp, ift_counter c)
2203 {
2204 	struct port_info *pi = if_getsoftc(ifp);
2205 	struct adapter *sc = pi->adapter;
2206 	struct cmac *mac = &pi->mac;
2207 	struct mac_stats *mstats = &mac->stats;
2208 
2209 	cxgb_refresh_stats(pi);
2210 
2211 	switch (c) {
2212 	case IFCOUNTER_IPACKETS:
2213 		return (mstats->rx_frames);
2214 
2215 	case IFCOUNTER_IERRORS:
2216 		return (mstats->rx_jabber + mstats->rx_data_errs +
2217 		    mstats->rx_sequence_errs + mstats->rx_runt +
2218 		    mstats->rx_too_long + mstats->rx_mac_internal_errs +
2219 		    mstats->rx_short + mstats->rx_fcs_errs);
2220 
2221 	case IFCOUNTER_OPACKETS:
2222 		return (mstats->tx_frames);
2223 
2224 	case IFCOUNTER_OERRORS:
2225 		return (mstats->tx_excess_collisions + mstats->tx_underrun +
2226 		    mstats->tx_len_errs + mstats->tx_mac_internal_errs +
2227 		    mstats->tx_excess_deferral + mstats->tx_fcs_errs);
2228 
2229 	case IFCOUNTER_COLLISIONS:
2230 		return (mstats->tx_total_collisions);
2231 
2232 	case IFCOUNTER_IBYTES:
2233 		return (mstats->rx_octets);
2234 
2235 	case IFCOUNTER_OBYTES:
2236 		return (mstats->tx_octets);
2237 
2238 	case IFCOUNTER_IMCASTS:
2239 		return (mstats->rx_mcast_frames);
2240 
2241 	case IFCOUNTER_OMCASTS:
2242 		return (mstats->tx_mcast_frames);
2243 
2244 	case IFCOUNTER_IQDROPS:
2245 		return (mstats->rx_cong_drops);
2246 
2247 	case IFCOUNTER_OQDROPS: {
2248 		int i;
2249 		uint64_t drops;
2250 
2251 		drops = 0;
2252 		if (sc->flags & FULL_INIT_DONE) {
2253 			for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
2254 				drops += sc->sge.qs[i].txq[TXQ_ETH].txq_mr->br_drops;
2255 		}
2256 
2257 		return (drops);
2258 
2259 	}
2260 
2261 	default:
2262 		return (if_get_counter_default(ifp, c));
2263 	}
2264 }
2265 
2266 static void
cxgb_async_intr(void * data)2267 cxgb_async_intr(void *data)
2268 {
2269 	adapter_t *sc = data;
2270 
2271 	t3_write_reg(sc, A_PL_INT_ENABLE0, 0);
2272 	(void) t3_read_reg(sc, A_PL_INT_ENABLE0);
2273 	taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
2274 }
2275 
2276 static void
link_check_callout(void * arg)2277 link_check_callout(void *arg)
2278 {
2279 	struct port_info *pi = arg;
2280 	struct adapter *sc = pi->adapter;
2281 
2282 	if (!isset(&sc->open_device_map, pi->port_id))
2283 		return;
2284 
2285 	taskqueue_enqueue(sc->tq, &pi->link_check_task);
2286 }
2287 
2288 static void
check_link_status(void * arg,int pending)2289 check_link_status(void *arg, int pending)
2290 {
2291 	struct port_info *pi = arg;
2292 	struct adapter *sc = pi->adapter;
2293 
2294 	if (!isset(&sc->open_device_map, pi->port_id))
2295 		return;
2296 
2297 	t3_link_changed(sc, pi->port_id);
2298 
2299 	if (pi->link_fault || !(pi->phy.caps & SUPPORTED_LINK_IRQ) ||
2300 	    pi->link_config.link_ok == 0)
2301 		callout_reset(&pi->link_check_ch, hz, link_check_callout, pi);
2302 }
2303 
2304 void
t3_os_link_intr(struct port_info * pi)2305 t3_os_link_intr(struct port_info *pi)
2306 {
2307 	/*
2308 	 * Schedule a link check in the near future.  If the link is flapping
2309 	 * rapidly we'll keep resetting the callout and delaying the check until
2310 	 * things stabilize a bit.
2311 	 */
2312 	callout_reset(&pi->link_check_ch, hz / 4, link_check_callout, pi);
2313 }
2314 
2315 static void
check_t3b2_mac(struct adapter * sc)2316 check_t3b2_mac(struct adapter *sc)
2317 {
2318 	int i;
2319 
2320 	if (sc->flags & CXGB_SHUTDOWN)
2321 		return;
2322 
2323 	for_each_port(sc, i) {
2324 		struct port_info *p = &sc->port[i];
2325 		int status;
2326 #ifdef INVARIANTS
2327 		if_t ifp = p->ifp;
2328 #endif
2329 
2330 		if (!isset(&sc->open_device_map, p->port_id) || p->link_fault ||
2331 		    !p->link_config.link_ok)
2332 			continue;
2333 
2334 		KASSERT(if_getdrvflags(ifp) & IFF_DRV_RUNNING,
2335 			("%s: state mismatch (drv_flags %x, device_map %x)",
2336 			 __func__, if_getdrvflags(ifp), sc->open_device_map));
2337 
2338 		PORT_LOCK(p);
2339 		status = t3b2_mac_watchdog_task(&p->mac);
2340 		if (status == 1)
2341 			p->mac.stats.num_toggled++;
2342 		else if (status == 2) {
2343 			struct cmac *mac = &p->mac;
2344 
2345 			cxgb_update_mac_settings(p);
2346 			t3_link_start(&p->phy, mac, &p->link_config);
2347 			t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2348 			t3_port_intr_enable(sc, p->port_id);
2349 			p->mac.stats.num_resets++;
2350 		}
2351 		PORT_UNLOCK(p);
2352 	}
2353 }
2354 
2355 static void
cxgb_tick(void * arg)2356 cxgb_tick(void *arg)
2357 {
2358 	adapter_t *sc = (adapter_t *)arg;
2359 
2360 	if (sc->flags & CXGB_SHUTDOWN)
2361 		return;
2362 
2363 	taskqueue_enqueue(sc->tq, &sc->tick_task);
2364 	callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
2365 }
2366 
2367 void
cxgb_refresh_stats(struct port_info * pi)2368 cxgb_refresh_stats(struct port_info *pi)
2369 {
2370 	struct timeval tv;
2371 	const struct timeval interval = {0, 250000};    /* 250ms */
2372 
2373 	getmicrotime(&tv);
2374 	timevalsub(&tv, &interval);
2375 	if (timevalcmp(&tv, &pi->last_refreshed, <))
2376 		return;
2377 
2378 	PORT_LOCK(pi);
2379 	t3_mac_update_stats(&pi->mac);
2380 	PORT_UNLOCK(pi);
2381 	getmicrotime(&pi->last_refreshed);
2382 }
2383 
2384 static void
cxgb_tick_handler(void * arg,int count)2385 cxgb_tick_handler(void *arg, int count)
2386 {
2387 	adapter_t *sc = (adapter_t *)arg;
2388 	const struct adapter_params *p = &sc->params;
2389 	int i;
2390 	uint32_t cause, reset;
2391 
2392 	if (sc->flags & CXGB_SHUTDOWN || !(sc->flags & FULL_INIT_DONE))
2393 		return;
2394 
2395 	if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map)
2396 		check_t3b2_mac(sc);
2397 
2398 	cause = t3_read_reg(sc, A_SG_INT_CAUSE) & (F_RSPQSTARVE | F_FLEMPTY);
2399 	if (cause) {
2400 		struct sge_qset *qs = &sc->sge.qs[0];
2401 		uint32_t mask, v;
2402 
2403 		v = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS) & ~0xff00;
2404 
2405 		mask = 1;
2406 		for (i = 0; i < SGE_QSETS; i++) {
2407 			if (v & mask)
2408 				qs[i].rspq.starved++;
2409 			mask <<= 1;
2410 		}
2411 
2412 		mask <<= SGE_QSETS; /* skip RSPQXDISABLED */
2413 
2414 		for (i = 0; i < SGE_QSETS * 2; i++) {
2415 			if (v & mask) {
2416 				qs[i / 2].fl[i % 2].empty++;
2417 			}
2418 			mask <<= 1;
2419 		}
2420 
2421 		/* clear */
2422 		t3_write_reg(sc, A_SG_RSPQ_FL_STATUS, v);
2423 		t3_write_reg(sc, A_SG_INT_CAUSE, cause);
2424 	}
2425 
2426 	for (i = 0; i < sc->params.nports; i++) {
2427 		struct port_info *pi = &sc->port[i];
2428 		struct cmac *mac = &pi->mac;
2429 
2430 		if (!isset(&sc->open_device_map, pi->port_id))
2431 			continue;
2432 
2433 		cxgb_refresh_stats(pi);
2434 
2435 		if (mac->multiport)
2436 			continue;
2437 
2438 		/* Count rx fifo overflows, once per second */
2439 		cause = t3_read_reg(sc, A_XGM_INT_CAUSE + mac->offset);
2440 		reset = 0;
2441 		if (cause & F_RXFIFO_OVERFLOW) {
2442 			mac->stats.rx_fifo_ovfl++;
2443 			reset |= F_RXFIFO_OVERFLOW;
2444 		}
2445 		t3_write_reg(sc, A_XGM_INT_CAUSE + mac->offset, reset);
2446 	}
2447 }
2448 
2449 static void
touch_bars(device_t dev)2450 touch_bars(device_t dev)
2451 {
2452 	/*
2453 	 * Don't enable yet
2454 	 */
2455 #if !defined(__LP64__) && 0
2456 	u32 v;
2457 
2458 	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v);
2459 	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v);
2460 	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v);
2461 	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v);
2462 	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v);
2463 	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v);
2464 #endif
2465 }
2466 
2467 static int
set_eeprom(struct port_info * pi,const uint8_t * data,int len,int offset)2468 set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset)
2469 {
2470 	uint8_t *buf;
2471 	int err = 0;
2472 	u32 aligned_offset, aligned_len, *p;
2473 	struct adapter *adapter = pi->adapter;
2474 
2475 
2476 	aligned_offset = offset & ~3;
2477 	aligned_len = (len + (offset & 3) + 3) & ~3;
2478 
2479 	if (aligned_offset != offset || aligned_len != len) {
2480 		buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO);
2481 		if (!buf)
2482 			return (ENOMEM);
2483 		err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
2484 		if (!err && aligned_len > 4)
2485 			err = t3_seeprom_read(adapter,
2486 					      aligned_offset + aligned_len - 4,
2487 					      (u32 *)&buf[aligned_len - 4]);
2488 		if (err)
2489 			goto out;
2490 		memcpy(buf + (offset & 3), data, len);
2491 	} else
2492 		buf = (uint8_t *)(uintptr_t)data;
2493 
2494 	err = t3_seeprom_wp(adapter, 0);
2495 	if (err)
2496 		goto out;
2497 
2498 	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2499 		err = t3_seeprom_write(adapter, aligned_offset, *p);
2500 		aligned_offset += 4;
2501 	}
2502 
2503 	if (!err)
2504 		err = t3_seeprom_wp(adapter, 1);
2505 out:
2506 	if (buf != data)
2507 		free(buf, M_DEVBUF);
2508 	return err;
2509 }
2510 
2511 
2512 static int
in_range(int val,int lo,int hi)2513 in_range(int val, int lo, int hi)
2514 {
2515 	return val < 0 || (val <= hi && val >= lo);
2516 }
2517 
2518 static int
cxgb_extension_open(struct cdev * dev,int flags,int fmp,struct thread * td)2519 cxgb_extension_open(struct cdev *dev, int flags, int fmp, struct thread *td)
2520 {
2521        return (0);
2522 }
2523 
2524 static int
cxgb_extension_close(struct cdev * dev,int flags,int fmt,struct thread * td)2525 cxgb_extension_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2526 {
2527        return (0);
2528 }
2529 
2530 static int
cxgb_extension_ioctl(struct cdev * dev,unsigned long cmd,caddr_t data,int fflag,struct thread * td)2531 cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
2532     int fflag, struct thread *td)
2533 {
2534 	int mmd, error = 0;
2535 	struct port_info *pi = dev->si_drv1;
2536 	adapter_t *sc = pi->adapter;
2537 
2538 #ifdef PRIV_SUPPORTED
2539 	if (priv_check(td, PRIV_DRIVER)) {
2540 		if (cxgb_debug)
2541 			printf("user does not have access to privileged ioctls\n");
2542 		return (EPERM);
2543 	}
2544 #else
2545 	if (suser(td)) {
2546 		if (cxgb_debug)
2547 			printf("user does not have access to privileged ioctls\n");
2548 		return (EPERM);
2549 	}
2550 #endif
2551 
2552 	switch (cmd) {
2553 	case CHELSIO_GET_MIIREG: {
2554 		uint32_t val;
2555 		struct cphy *phy = &pi->phy;
2556 		struct ch_mii_data *mid = (struct ch_mii_data *)data;
2557 
2558 		if (!phy->mdio_read)
2559 			return (EOPNOTSUPP);
2560 		if (is_10G(sc)) {
2561 			mmd = mid->phy_id >> 8;
2562 			if (!mmd)
2563 				mmd = MDIO_DEV_PCS;
2564 			else if (mmd > MDIO_DEV_VEND2)
2565 				return (EINVAL);
2566 
2567 			error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
2568 					     mid->reg_num, &val);
2569 		} else
2570 		        error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
2571 					     mid->reg_num & 0x1f, &val);
2572 		if (error == 0)
2573 			mid->val_out = val;
2574 		break;
2575 	}
2576 	case CHELSIO_SET_MIIREG: {
2577 		struct cphy *phy = &pi->phy;
2578 		struct ch_mii_data *mid = (struct ch_mii_data *)data;
2579 
2580 		if (!phy->mdio_write)
2581 			return (EOPNOTSUPP);
2582 		if (is_10G(sc)) {
2583 			mmd = mid->phy_id >> 8;
2584 			if (!mmd)
2585 				mmd = MDIO_DEV_PCS;
2586 			else if (mmd > MDIO_DEV_VEND2)
2587 				return (EINVAL);
2588 
2589 			error = phy->mdio_write(sc, mid->phy_id & 0x1f,
2590 					      mmd, mid->reg_num, mid->val_in);
2591 		} else
2592 			error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
2593 					      mid->reg_num & 0x1f,
2594 					      mid->val_in);
2595 		break;
2596 	}
2597 	case CHELSIO_SETREG: {
2598 		struct ch_reg *edata = (struct ch_reg *)data;
2599 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2600 			return (EFAULT);
2601 		t3_write_reg(sc, edata->addr, edata->val);
2602 		break;
2603 	}
2604 	case CHELSIO_GETREG: {
2605 		struct ch_reg *edata = (struct ch_reg *)data;
2606 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2607 			return (EFAULT);
2608 		edata->val = t3_read_reg(sc, edata->addr);
2609 		break;
2610 	}
2611 	case CHELSIO_GET_SGE_CONTEXT: {
2612 		struct ch_cntxt *ecntxt = (struct ch_cntxt *)data;
2613 		mtx_lock_spin(&sc->sge.reg_lock);
2614 		switch (ecntxt->cntxt_type) {
2615 		case CNTXT_TYPE_EGRESS:
2616 			error = -t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
2617 			    ecntxt->data);
2618 			break;
2619 		case CNTXT_TYPE_FL:
2620 			error = -t3_sge_read_fl(sc, ecntxt->cntxt_id,
2621 			    ecntxt->data);
2622 			break;
2623 		case CNTXT_TYPE_RSP:
2624 			error = -t3_sge_read_rspq(sc, ecntxt->cntxt_id,
2625 			    ecntxt->data);
2626 			break;
2627 		case CNTXT_TYPE_CQ:
2628 			error = -t3_sge_read_cq(sc, ecntxt->cntxt_id,
2629 			    ecntxt->data);
2630 			break;
2631 		default:
2632 			error = EINVAL;
2633 			break;
2634 		}
2635 		mtx_unlock_spin(&sc->sge.reg_lock);
2636 		break;
2637 	}
2638 	case CHELSIO_GET_SGE_DESC: {
2639 		struct ch_desc *edesc = (struct ch_desc *)data;
2640 		int ret;
2641 		if (edesc->queue_num >= SGE_QSETS * 6)
2642 			return (EINVAL);
2643 		ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
2644 		    edesc->queue_num % 6, edesc->idx, edesc->data);
2645 		if (ret < 0)
2646 			return (EINVAL);
2647 		edesc->size = ret;
2648 		break;
2649 	}
2650 	case CHELSIO_GET_QSET_PARAMS: {
2651 		struct qset_params *q;
2652 		struct ch_qset_params *t = (struct ch_qset_params *)data;
2653 		int q1 = pi->first_qset;
2654 		int nqsets = pi->nqsets;
2655 		int i;
2656 
2657 		if (t->qset_idx >= nqsets)
2658 			return EINVAL;
2659 
2660 		i = q1 + t->qset_idx;
2661 		q = &sc->params.sge.qset[i];
2662 		t->rspq_size   = q->rspq_size;
2663 		t->txq_size[0] = q->txq_size[0];
2664 		t->txq_size[1] = q->txq_size[1];
2665 		t->txq_size[2] = q->txq_size[2];
2666 		t->fl_size[0]  = q->fl_size;
2667 		t->fl_size[1]  = q->jumbo_size;
2668 		t->polling     = q->polling;
2669 		t->lro         = q->lro;
2670 		t->intr_lat    = q->coalesce_usecs;
2671 		t->cong_thres  = q->cong_thres;
2672 		t->qnum        = i;
2673 
2674 		if ((sc->flags & FULL_INIT_DONE) == 0)
2675 			t->vector = 0;
2676 		else if (sc->flags & USING_MSIX)
2677 			t->vector = rman_get_start(sc->msix_irq_res[i]);
2678 		else
2679 			t->vector = rman_get_start(sc->irq_res);
2680 
2681 		break;
2682 	}
2683 	case CHELSIO_GET_QSET_NUM: {
2684 		struct ch_reg *edata = (struct ch_reg *)data;
2685 		edata->val = pi->nqsets;
2686 		break;
2687 	}
2688 	case CHELSIO_LOAD_FW: {
2689 		uint8_t *fw_data;
2690 		uint32_t vers;
2691 		struct ch_mem_range *t = (struct ch_mem_range *)data;
2692 
2693 		/*
2694 		 * You're allowed to load a firmware only before FULL_INIT_DONE
2695 		 *
2696 		 * FW_UPTODATE is also set so the rest of the initialization
2697 		 * will not overwrite what was loaded here.  This gives you the
2698 		 * flexibility to load any firmware (and maybe shoot yourself in
2699 		 * the foot).
2700 		 */
2701 
2702 		ADAPTER_LOCK(sc);
2703 		if (sc->open_device_map || sc->flags & FULL_INIT_DONE) {
2704 			ADAPTER_UNLOCK(sc);
2705 			return (EBUSY);
2706 		}
2707 
2708 		fw_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
2709 		if (!fw_data)
2710 			error = ENOMEM;
2711 		else
2712 			error = copyin(t->buf, fw_data, t->len);
2713 
2714 		if (!error)
2715 			error = -t3_load_fw(sc, fw_data, t->len);
2716 
2717 		if (t3_get_fw_version(sc, &vers) == 0) {
2718 			snprintf(&sc->fw_version[0], sizeof(sc->fw_version),
2719 			    "%d.%d.%d", G_FW_VERSION_MAJOR(vers),
2720 			    G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers));
2721 		}
2722 
2723 		if (!error)
2724 			sc->flags |= FW_UPTODATE;
2725 
2726 		free(fw_data, M_DEVBUF);
2727 		ADAPTER_UNLOCK(sc);
2728 		break;
2729 	}
2730 	case CHELSIO_LOAD_BOOT: {
2731 		uint8_t *boot_data;
2732 		struct ch_mem_range *t = (struct ch_mem_range *)data;
2733 
2734 		boot_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
2735 		if (!boot_data)
2736 			return ENOMEM;
2737 
2738 		error = copyin(t->buf, boot_data, t->len);
2739 		if (!error)
2740 			error = -t3_load_boot(sc, boot_data, t->len);
2741 
2742 		free(boot_data, M_DEVBUF);
2743 		break;
2744 	}
2745 	case CHELSIO_GET_PM: {
2746 		struct ch_pm *m = (struct ch_pm *)data;
2747 		struct tp_params *p = &sc->params.tp;
2748 
2749 		if (!is_offload(sc))
2750 			return (EOPNOTSUPP);
2751 
2752 		m->tx_pg_sz = p->tx_pg_size;
2753 		m->tx_num_pg = p->tx_num_pgs;
2754 		m->rx_pg_sz  = p->rx_pg_size;
2755 		m->rx_num_pg = p->rx_num_pgs;
2756 		m->pm_total  = p->pmtx_size + p->chan_rx_size * p->nchan;
2757 
2758 		break;
2759 	}
2760 	case CHELSIO_SET_PM: {
2761 		struct ch_pm *m = (struct ch_pm *)data;
2762 		struct tp_params *p = &sc->params.tp;
2763 
2764 		if (!is_offload(sc))
2765 			return (EOPNOTSUPP);
2766 		if (sc->flags & FULL_INIT_DONE)
2767 			return (EBUSY);
2768 
2769 		if (!m->rx_pg_sz || (m->rx_pg_sz & (m->rx_pg_sz - 1)) ||
2770 		    !m->tx_pg_sz || (m->tx_pg_sz & (m->tx_pg_sz - 1)))
2771 			return (EINVAL);	/* not power of 2 */
2772 		if (!(m->rx_pg_sz & 0x14000))
2773 			return (EINVAL);	/* not 16KB or 64KB */
2774 		if (!(m->tx_pg_sz & 0x1554000))
2775 			return (EINVAL);
2776 		if (m->tx_num_pg == -1)
2777 			m->tx_num_pg = p->tx_num_pgs;
2778 		if (m->rx_num_pg == -1)
2779 			m->rx_num_pg = p->rx_num_pgs;
2780 		if (m->tx_num_pg % 24 || m->rx_num_pg % 24)
2781 			return (EINVAL);
2782 		if (m->rx_num_pg * m->rx_pg_sz > p->chan_rx_size ||
2783 		    m->tx_num_pg * m->tx_pg_sz > p->chan_tx_size)
2784 			return (EINVAL);
2785 
2786 		p->rx_pg_size = m->rx_pg_sz;
2787 		p->tx_pg_size = m->tx_pg_sz;
2788 		p->rx_num_pgs = m->rx_num_pg;
2789 		p->tx_num_pgs = m->tx_num_pg;
2790 		break;
2791 	}
2792 	case CHELSIO_SETMTUTAB: {
2793 		struct ch_mtus *m = (struct ch_mtus *)data;
2794 		int i;
2795 
2796 		if (!is_offload(sc))
2797 			return (EOPNOTSUPP);
2798 		if (offload_running(sc))
2799 			return (EBUSY);
2800 		if (m->nmtus != NMTUS)
2801 			return (EINVAL);
2802 		if (m->mtus[0] < 81)         /* accommodate SACK */
2803 			return (EINVAL);
2804 
2805 		/*
2806 		 * MTUs must be in ascending order
2807 		 */
2808 		for (i = 1; i < NMTUS; ++i)
2809 			if (m->mtus[i] < m->mtus[i - 1])
2810 				return (EINVAL);
2811 
2812 		memcpy(sc->params.mtus, m->mtus, sizeof(sc->params.mtus));
2813 		break;
2814 	}
2815 	case CHELSIO_GETMTUTAB: {
2816 		struct ch_mtus *m = (struct ch_mtus *)data;
2817 
2818 		if (!is_offload(sc))
2819 			return (EOPNOTSUPP);
2820 
2821 		memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
2822 		m->nmtus = NMTUS;
2823 		break;
2824 	}
2825 	case CHELSIO_GET_MEM: {
2826 		struct ch_mem_range *t = (struct ch_mem_range *)data;
2827 		struct mc7 *mem;
2828 		uint8_t *useraddr;
2829 		u64 buf[32];
2830 
2831 		/*
2832 		 * Use these to avoid modifying len/addr in the return
2833 		 * struct
2834 		 */
2835 		uint32_t len = t->len, addr = t->addr;
2836 
2837 		if (!is_offload(sc))
2838 			return (EOPNOTSUPP);
2839 		if (!(sc->flags & FULL_INIT_DONE))
2840 			return (EIO);         /* need the memory controllers */
2841 		if ((addr & 0x7) || (len & 0x7))
2842 			return (EINVAL);
2843 		if (t->mem_id == MEM_CM)
2844 			mem = &sc->cm;
2845 		else if (t->mem_id == MEM_PMRX)
2846 			mem = &sc->pmrx;
2847 		else if (t->mem_id == MEM_PMTX)
2848 			mem = &sc->pmtx;
2849 		else
2850 			return (EINVAL);
2851 
2852 		/*
2853 		 * Version scheme:
2854 		 * bits 0..9: chip version
2855 		 * bits 10..15: chip revision
2856 		 */
2857 		t->version = 3 | (sc->params.rev << 10);
2858 
2859 		/*
2860 		 * Read 256 bytes at a time as len can be large and we don't
2861 		 * want to use huge intermediate buffers.
2862 		 */
2863 		useraddr = (uint8_t *)t->buf;
2864 		while (len) {
2865 			unsigned int chunk = min(len, sizeof(buf));
2866 
2867 			error = t3_mc7_bd_read(mem, addr / 8, chunk / 8, buf);
2868 			if (error)
2869 				return (-error);
2870 			if (copyout(buf, useraddr, chunk))
2871 				return (EFAULT);
2872 			useraddr += chunk;
2873 			addr += chunk;
2874 			len -= chunk;
2875 		}
2876 		break;
2877 	}
2878 	case CHELSIO_READ_TCAM_WORD: {
2879 		struct ch_tcam_word *t = (struct ch_tcam_word *)data;
2880 
2881 		if (!is_offload(sc))
2882 			return (EOPNOTSUPP);
2883 		if (!(sc->flags & FULL_INIT_DONE))
2884 			return (EIO);         /* need MC5 */
2885 		return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
2886 		break;
2887 	}
2888 	case CHELSIO_SET_TRACE_FILTER: {
2889 		struct ch_trace *t = (struct ch_trace *)data;
2890 		const struct trace_params *tp;
2891 
2892 		tp = (const struct trace_params *)&t->sip;
2893 		if (t->config_tx)
2894 			t3_config_trace_filter(sc, tp, 0, t->invert_match,
2895 					       t->trace_tx);
2896 		if (t->config_rx)
2897 			t3_config_trace_filter(sc, tp, 1, t->invert_match,
2898 					       t->trace_rx);
2899 		break;
2900 	}
2901 	case CHELSIO_SET_PKTSCHED: {
2902 		struct ch_pktsched_params *p = (struct ch_pktsched_params *)data;
2903 		if (sc->open_device_map == 0)
2904 			return (EAGAIN);
2905 		send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
2906 		    p->binding);
2907 		break;
2908 	}
2909 	case CHELSIO_IFCONF_GETREGS: {
2910 		struct ch_ifconf_regs *regs = (struct ch_ifconf_regs *)data;
2911 		int reglen = cxgb_get_regs_len();
2912 		uint8_t *buf = malloc(reglen, M_DEVBUF, M_NOWAIT);
2913 		if (buf == NULL) {
2914 			return (ENOMEM);
2915 		}
2916 		if (regs->len > reglen)
2917 			regs->len = reglen;
2918 		else if (regs->len < reglen)
2919 			error = ENOBUFS;
2920 
2921 		if (!error) {
2922 			cxgb_get_regs(sc, regs, buf);
2923 			error = copyout(buf, regs->data, reglen);
2924 		}
2925 		free(buf, M_DEVBUF);
2926 
2927 		break;
2928 	}
2929 	case CHELSIO_SET_HW_SCHED: {
2930 		struct ch_hw_sched *t = (struct ch_hw_sched *)data;
2931 		unsigned int ticks_per_usec = core_ticks_per_usec(sc);
2932 
2933 		if ((sc->flags & FULL_INIT_DONE) == 0)
2934 			return (EAGAIN);       /* need TP to be initialized */
2935 		if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
2936 		    !in_range(t->channel, 0, 1) ||
2937 		    !in_range(t->kbps, 0, 10000000) ||
2938 		    !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
2939 		    !in_range(t->flow_ipg, 0,
2940 			      dack_ticks_to_usec(sc, 0x7ff)))
2941 			return (EINVAL);
2942 
2943 		if (t->kbps >= 0) {
2944 			error = t3_config_sched(sc, t->kbps, t->sched);
2945 			if (error < 0)
2946 				return (-error);
2947 		}
2948 		if (t->class_ipg >= 0)
2949 			t3_set_sched_ipg(sc, t->sched, t->class_ipg);
2950 		if (t->flow_ipg >= 0) {
2951 			t->flow_ipg *= 1000;     /* us -> ns */
2952 			t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
2953 		}
2954 		if (t->mode >= 0) {
2955 			int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
2956 
2957 			t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2958 					 bit, t->mode ? bit : 0);
2959 		}
2960 		if (t->channel >= 0)
2961 			t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2962 					 1 << t->sched, t->channel << t->sched);
2963 		break;
2964 	}
2965 	case CHELSIO_GET_EEPROM: {
2966 		int i;
2967 		struct ch_eeprom *e = (struct ch_eeprom *)data;
2968 		uint8_t *buf;
2969 
2970 		if (e->offset & 3 || e->offset >= EEPROMSIZE ||
2971 		    e->len > EEPROMSIZE || e->offset + e->len > EEPROMSIZE) {
2972 			return (EINVAL);
2973 		}
2974 
2975 		buf = malloc(EEPROMSIZE, M_DEVBUF, M_NOWAIT);
2976 		if (buf == NULL) {
2977 			return (ENOMEM);
2978 		}
2979 		e->magic = EEPROM_MAGIC;
2980 		for (i = e->offset & ~3; !error && i < e->offset + e->len; i += 4)
2981 			error = -t3_seeprom_read(sc, i, (uint32_t *)&buf[i]);
2982 
2983 		if (!error)
2984 			error = copyout(buf + e->offset, e->data, e->len);
2985 
2986 		free(buf, M_DEVBUF);
2987 		break;
2988 	}
2989 	case CHELSIO_CLEAR_STATS: {
2990 		if (!(sc->flags & FULL_INIT_DONE))
2991 			return EAGAIN;
2992 
2993 		PORT_LOCK(pi);
2994 		t3_mac_update_stats(&pi->mac);
2995 		memset(&pi->mac.stats, 0, sizeof(pi->mac.stats));
2996 		PORT_UNLOCK(pi);
2997 		break;
2998 	}
2999 	case CHELSIO_GET_UP_LA: {
3000 		struct ch_up_la *la = (struct ch_up_la *)data;
3001 		uint8_t *buf = malloc(LA_BUFSIZE, M_DEVBUF, M_NOWAIT);
3002 		if (buf == NULL) {
3003 			return (ENOMEM);
3004 		}
3005 		if (la->bufsize < LA_BUFSIZE)
3006 			error = ENOBUFS;
3007 
3008 		if (!error)
3009 			error = -t3_get_up_la(sc, &la->stopped, &la->idx,
3010 					      &la->bufsize, buf);
3011 		if (!error)
3012 			error = copyout(buf, la->data, la->bufsize);
3013 
3014 		free(buf, M_DEVBUF);
3015 		break;
3016 	}
3017 	case CHELSIO_GET_UP_IOQS: {
3018 		struct ch_up_ioqs *ioqs = (struct ch_up_ioqs *)data;
3019 		uint8_t *buf = malloc(IOQS_BUFSIZE, M_DEVBUF, M_NOWAIT);
3020 		uint32_t *v;
3021 
3022 		if (buf == NULL) {
3023 			return (ENOMEM);
3024 		}
3025 		if (ioqs->bufsize < IOQS_BUFSIZE)
3026 			error = ENOBUFS;
3027 
3028 		if (!error)
3029 			error = -t3_get_up_ioqs(sc, &ioqs->bufsize, buf);
3030 
3031 		if (!error) {
3032 			v = (uint32_t *)buf;
3033 
3034 			ioqs->ioq_rx_enable = *v++;
3035 			ioqs->ioq_tx_enable = *v++;
3036 			ioqs->ioq_rx_status = *v++;
3037 			ioqs->ioq_tx_status = *v++;
3038 
3039 			error = copyout(v, ioqs->data, ioqs->bufsize);
3040 		}
3041 
3042 		free(buf, M_DEVBUF);
3043 		break;
3044 	}
3045 	case CHELSIO_SET_FILTER: {
3046 		struct ch_filter *f = (struct ch_filter *)data;
3047 		struct filter_info *p;
3048 		unsigned int nfilters = sc->params.mc5.nfilters;
3049 
3050 		if (!is_offload(sc))
3051 			return (EOPNOTSUPP);	/* No TCAM */
3052 		if (!(sc->flags & FULL_INIT_DONE))
3053 			return (EAGAIN);	/* mc5 not setup yet */
3054 		if (nfilters == 0)
3055 			return (EBUSY);		/* TOE will use TCAM */
3056 
3057 		/* sanity checks */
3058 		if (f->filter_id >= nfilters ||
3059 		    (f->val.dip && f->mask.dip != 0xffffffff) ||
3060 		    (f->val.sport && f->mask.sport != 0xffff) ||
3061 		    (f->val.dport && f->mask.dport != 0xffff) ||
3062 		    (f->val.vlan && f->mask.vlan != 0xfff) ||
3063 		    (f->val.vlan_prio &&
3064 			f->mask.vlan_prio != FILTER_NO_VLAN_PRI) ||
3065 		    (f->mac_addr_idx != 0xffff && f->mac_addr_idx > 15) ||
3066 		    f->qset >= SGE_QSETS ||
3067 		    sc->rrss_map[f->qset] >= RSS_TABLE_SIZE)
3068 			return (EINVAL);
3069 
3070 		/* Was allocated with M_WAITOK */
3071 		KASSERT(sc->filters, ("filter table NULL\n"));
3072 
3073 		p = &sc->filters[f->filter_id];
3074 		if (p->locked)
3075 			return (EPERM);
3076 
3077 		bzero(p, sizeof(*p));
3078 		p->sip = f->val.sip;
3079 		p->sip_mask = f->mask.sip;
3080 		p->dip = f->val.dip;
3081 		p->sport = f->val.sport;
3082 		p->dport = f->val.dport;
3083 		p->vlan = f->mask.vlan ? f->val.vlan : 0xfff;
3084 		p->vlan_prio = f->mask.vlan_prio ? (f->val.vlan_prio & 6) :
3085 		    FILTER_NO_VLAN_PRI;
3086 		p->mac_hit = f->mac_hit;
3087 		p->mac_vld = f->mac_addr_idx != 0xffff;
3088 		p->mac_idx = f->mac_addr_idx;
3089 		p->pkt_type = f->proto;
3090 		p->report_filter_id = f->want_filter_id;
3091 		p->pass = f->pass;
3092 		p->rss = f->rss;
3093 		p->qset = f->qset;
3094 
3095 		error = set_filter(sc, f->filter_id, p);
3096 		if (error == 0)
3097 			p->valid = 1;
3098 		break;
3099 	}
3100 	case CHELSIO_DEL_FILTER: {
3101 		struct ch_filter *f = (struct ch_filter *)data;
3102 		struct filter_info *p;
3103 		unsigned int nfilters = sc->params.mc5.nfilters;
3104 
3105 		if (!is_offload(sc))
3106 			return (EOPNOTSUPP);
3107 		if (!(sc->flags & FULL_INIT_DONE))
3108 			return (EAGAIN);
3109 		if (nfilters == 0 || sc->filters == NULL)
3110 			return (EINVAL);
3111 		if (f->filter_id >= nfilters)
3112 		       return (EINVAL);
3113 
3114 		p = &sc->filters[f->filter_id];
3115 		if (p->locked)
3116 			return (EPERM);
3117 		if (!p->valid)
3118 			return (EFAULT); /* Read "Bad address" as "Bad index" */
3119 
3120 		bzero(p, sizeof(*p));
3121 		p->sip = p->sip_mask = 0xffffffff;
3122 		p->vlan = 0xfff;
3123 		p->vlan_prio = FILTER_NO_VLAN_PRI;
3124 		p->pkt_type = 1;
3125 		error = set_filter(sc, f->filter_id, p);
3126 		break;
3127 	}
3128 	case CHELSIO_GET_FILTER: {
3129 		struct ch_filter *f = (struct ch_filter *)data;
3130 		struct filter_info *p;
3131 		unsigned int i, nfilters = sc->params.mc5.nfilters;
3132 
3133 		if (!is_offload(sc))
3134 			return (EOPNOTSUPP);
3135 		if (!(sc->flags & FULL_INIT_DONE))
3136 			return (EAGAIN);
3137 		if (nfilters == 0 || sc->filters == NULL)
3138 			return (EINVAL);
3139 
3140 		i = f->filter_id == 0xffffffff ? 0 : f->filter_id + 1;
3141 		for (; i < nfilters; i++) {
3142 			p = &sc->filters[i];
3143 			if (!p->valid)
3144 				continue;
3145 
3146 			bzero(f, sizeof(*f));
3147 
3148 			f->filter_id = i;
3149 			f->val.sip = p->sip;
3150 			f->mask.sip = p->sip_mask;
3151 			f->val.dip = p->dip;
3152 			f->mask.dip = p->dip ? 0xffffffff : 0;
3153 			f->val.sport = p->sport;
3154 			f->mask.sport = p->sport ? 0xffff : 0;
3155 			f->val.dport = p->dport;
3156 			f->mask.dport = p->dport ? 0xffff : 0;
3157 			f->val.vlan = p->vlan == 0xfff ? 0 : p->vlan;
3158 			f->mask.vlan = p->vlan == 0xfff ? 0 : 0xfff;
3159 			f->val.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
3160 			    0 : p->vlan_prio;
3161 			f->mask.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
3162 			    0 : FILTER_NO_VLAN_PRI;
3163 			f->mac_hit = p->mac_hit;
3164 			f->mac_addr_idx = p->mac_vld ? p->mac_idx : 0xffff;
3165 			f->proto = p->pkt_type;
3166 			f->want_filter_id = p->report_filter_id;
3167 			f->pass = p->pass;
3168 			f->rss = p->rss;
3169 			f->qset = p->qset;
3170 
3171 			break;
3172 		}
3173 
3174 		if (i == nfilters)
3175 			f->filter_id = 0xffffffff;
3176 		break;
3177 	}
3178 	default:
3179 		return (EOPNOTSUPP);
3180 		break;
3181 	}
3182 
3183 	return (error);
3184 }
3185 
3186 static __inline void
reg_block_dump(struct adapter * ap,uint8_t * buf,unsigned int start,unsigned int end)3187 reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
3188     unsigned int end)
3189 {
3190 	uint32_t *p = (uint32_t *)(buf + start);
3191 
3192 	for ( ; start <= end; start += sizeof(uint32_t))
3193 		*p++ = t3_read_reg(ap, start);
3194 }
3195 
3196 #define T3_REGMAP_SIZE (3 * 1024)
3197 static int
cxgb_get_regs_len(void)3198 cxgb_get_regs_len(void)
3199 {
3200 	return T3_REGMAP_SIZE;
3201 }
3202 
3203 static void
cxgb_get_regs(adapter_t * sc,struct ch_ifconf_regs * regs,uint8_t * buf)3204 cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf)
3205 {
3206 
3207 	/*
3208 	 * Version scheme:
3209 	 * bits 0..9: chip version
3210 	 * bits 10..15: chip revision
3211 	 * bit 31: set for PCIe cards
3212 	 */
3213 	regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
3214 
3215 	/*
3216 	 * We skip the MAC statistics registers because they are clear-on-read.
3217 	 * Also reading multi-register stats would need to synchronize with the
3218 	 * periodic mac stats accumulation.  Hard to justify the complexity.
3219 	 */
3220 	memset(buf, 0, cxgb_get_regs_len());
3221 	reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
3222 	reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
3223 	reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
3224 	reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
3225 	reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
3226 	reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0,
3227 		       XGM_REG(A_XGM_SERDES_STAT3, 1));
3228 	reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
3229 		       XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
3230 }
3231 
3232 static int
alloc_filters(struct adapter * sc)3233 alloc_filters(struct adapter *sc)
3234 {
3235 	struct filter_info *p;
3236 	unsigned int nfilters = sc->params.mc5.nfilters;
3237 
3238 	if (nfilters == 0)
3239 		return (0);
3240 
3241 	p = malloc(sizeof(*p) * nfilters, M_DEVBUF, M_WAITOK | M_ZERO);
3242 	sc->filters = p;
3243 
3244 	p = &sc->filters[nfilters - 1];
3245 	p->vlan = 0xfff;
3246 	p->vlan_prio = FILTER_NO_VLAN_PRI;
3247 	p->pass = p->rss = p->valid = p->locked = 1;
3248 
3249 	return (0);
3250 }
3251 
3252 static int
setup_hw_filters(struct adapter * sc)3253 setup_hw_filters(struct adapter *sc)
3254 {
3255 	int i, rc;
3256 	unsigned int nfilters = sc->params.mc5.nfilters;
3257 
3258 	if (!sc->filters)
3259 		return (0);
3260 
3261 	t3_enable_filters(sc);
3262 
3263 	for (i = rc = 0; i < nfilters && !rc; i++) {
3264 		if (sc->filters[i].locked)
3265 			rc = set_filter(sc, i, &sc->filters[i]);
3266 	}
3267 
3268 	return (rc);
3269 }
3270 
3271 static int
set_filter(struct adapter * sc,int id,const struct filter_info * f)3272 set_filter(struct adapter *sc, int id, const struct filter_info *f)
3273 {
3274 	int len;
3275 	struct mbuf *m;
3276 	struct ulp_txpkt *txpkt;
3277 	struct work_request_hdr *wr;
3278 	struct cpl_pass_open_req *oreq;
3279 	struct cpl_set_tcb_field *sreq;
3280 
3281 	len = sizeof(*wr) + sizeof(*oreq) + 2 * sizeof(*sreq);
3282 	KASSERT(len <= MHLEN, ("filter request too big for an mbuf"));
3283 
3284 	id += t3_mc5_size(&sc->mc5) - sc->params.mc5.nroutes -
3285 	      sc->params.mc5.nfilters;
3286 
3287 	m = m_gethdr(M_WAITOK, MT_DATA);
3288 	m->m_len = m->m_pkthdr.len = len;
3289 	bzero(mtod(m, char *), len);
3290 
3291 	wr = mtod(m, struct work_request_hdr *);
3292 	wr->wrh_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS) | F_WR_ATOMIC);
3293 
3294 	oreq = (struct cpl_pass_open_req *)(wr + 1);
3295 	txpkt = (struct ulp_txpkt *)oreq;
3296 	txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
3297 	txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*oreq) / 8));
3298 	OPCODE_TID(oreq) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, id));
3299 	oreq->local_port = htons(f->dport);
3300 	oreq->peer_port = htons(f->sport);
3301 	oreq->local_ip = htonl(f->dip);
3302 	oreq->peer_ip = htonl(f->sip);
3303 	oreq->peer_netmask = htonl(f->sip_mask);
3304 	oreq->opt0h = 0;
3305 	oreq->opt0l = htonl(F_NO_OFFLOAD);
3306 	oreq->opt1 = htonl(V_MAC_MATCH_VALID(f->mac_vld) |
3307 			 V_CONN_POLICY(CPL_CONN_POLICY_FILTER) |
3308 			 V_VLAN_PRI(f->vlan_prio >> 1) |
3309 			 V_VLAN_PRI_VALID(f->vlan_prio != FILTER_NO_VLAN_PRI) |
3310 			 V_PKT_TYPE(f->pkt_type) | V_OPT1_VLAN(f->vlan) |
3311 			 V_MAC_MATCH(f->mac_idx | (f->mac_hit << 4)));
3312 
3313 	sreq = (struct cpl_set_tcb_field *)(oreq + 1);
3314 	set_tcb_field_ulp(sreq, id, 1, 0x1800808000ULL,
3315 			  (f->report_filter_id << 15) | (1 << 23) |
3316 			  ((u64)f->pass << 35) | ((u64)!f->rss << 36));
3317 	set_tcb_field_ulp(sreq + 1, id, 0, 0xffffffff, (2 << 19) | 1);
3318 	t3_mgmt_tx(sc, m);
3319 
3320 	if (f->pass && !f->rss) {
3321 		len = sizeof(*sreq);
3322 		m = m_gethdr(M_WAITOK, MT_DATA);
3323 		m->m_len = m->m_pkthdr.len = len;
3324 		bzero(mtod(m, char *), len);
3325 		sreq = mtod(m, struct cpl_set_tcb_field *);
3326 		sreq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
3327 		mk_set_tcb_field(sreq, id, 25, 0x3f80000,
3328 				 (u64)sc->rrss_map[f->qset] << 19);
3329 		t3_mgmt_tx(sc, m);
3330 	}
3331 	return 0;
3332 }
3333 
3334 static inline void
mk_set_tcb_field(struct cpl_set_tcb_field * req,unsigned int tid,unsigned int word,u64 mask,u64 val)3335 mk_set_tcb_field(struct cpl_set_tcb_field *req, unsigned int tid,
3336     unsigned int word, u64 mask, u64 val)
3337 {
3338 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
3339 	req->reply = V_NO_REPLY(1);
3340 	req->cpu_idx = 0;
3341 	req->word = htons(word);
3342 	req->mask = htobe64(mask);
3343 	req->val = htobe64(val);
3344 }
3345 
3346 static inline void
set_tcb_field_ulp(struct cpl_set_tcb_field * req,unsigned int tid,unsigned int word,u64 mask,u64 val)3347 set_tcb_field_ulp(struct cpl_set_tcb_field *req, unsigned int tid,
3348     unsigned int word, u64 mask, u64 val)
3349 {
3350 	struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
3351 
3352 	txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
3353 	txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*req) / 8));
3354 	mk_set_tcb_field(req, tid, word, mask, val);
3355 }
3356 
3357 void
t3_iterate(void (* func)(struct adapter *,void *),void * arg)3358 t3_iterate(void (*func)(struct adapter *, void *), void *arg)
3359 {
3360 	struct adapter *sc;
3361 
3362 	mtx_lock(&t3_list_lock);
3363 	SLIST_FOREACH(sc, &t3_list, link) {
3364 		/*
3365 		 * func should not make any assumptions about what state sc is
3366 		 * in - the only guarantee is that sc->sc_lock is a valid lock.
3367 		 */
3368 		func(sc, arg);
3369 	}
3370 	mtx_unlock(&t3_list_lock);
3371 }
3372 
3373 #ifdef TCP_OFFLOAD
3374 static int
toe_capability(struct port_info * pi,int enable)3375 toe_capability(struct port_info *pi, int enable)
3376 {
3377 	int rc;
3378 	struct adapter *sc = pi->adapter;
3379 
3380 	ADAPTER_LOCK_ASSERT_OWNED(sc);
3381 
3382 	if (!is_offload(sc))
3383 		return (ENODEV);
3384 
3385 	if (enable) {
3386 		if (!(sc->flags & FULL_INIT_DONE)) {
3387 			log(LOG_WARNING,
3388 			    "You must enable a cxgb interface first\n");
3389 			return (EAGAIN);
3390 		}
3391 
3392 		if (isset(&sc->offload_map, pi->port_id))
3393 			return (0);
3394 
3395 		if (!(sc->flags & TOM_INIT_DONE)) {
3396 			rc = t3_activate_uld(sc, ULD_TOM);
3397 			if (rc == EAGAIN) {
3398 				log(LOG_WARNING,
3399 				    "You must kldload t3_tom.ko before trying "
3400 				    "to enable TOE on a cxgb interface.\n");
3401 			}
3402 			if (rc != 0)
3403 				return (rc);
3404 			KASSERT(sc->tom_softc != NULL,
3405 			    ("%s: TOM activated but softc NULL", __func__));
3406 			KASSERT(sc->flags & TOM_INIT_DONE,
3407 			    ("%s: TOM activated but flag not set", __func__));
3408 		}
3409 
3410 		setbit(&sc->offload_map, pi->port_id);
3411 
3412 		/*
3413 		 * XXX: Temporary code to allow iWARP to be enabled when TOE is
3414 		 * enabled on any port.  Need to figure out how to enable,
3415 		 * disable, load, and unload iWARP cleanly.
3416 		 */
3417 		if (!isset(&sc->offload_map, MAX_NPORTS) &&
3418 		    t3_activate_uld(sc, ULD_IWARP) == 0)
3419 			setbit(&sc->offload_map, MAX_NPORTS);
3420 	} else {
3421 		if (!isset(&sc->offload_map, pi->port_id))
3422 			return (0);
3423 
3424 		KASSERT(sc->flags & TOM_INIT_DONE,
3425 		    ("%s: TOM never initialized?", __func__));
3426 		clrbit(&sc->offload_map, pi->port_id);
3427 	}
3428 
3429 	return (0);
3430 }
3431 
3432 /*
3433  * Add an upper layer driver to the global list.
3434  */
3435 int
t3_register_uld(struct uld_info * ui)3436 t3_register_uld(struct uld_info *ui)
3437 {
3438 	int rc = 0;
3439 	struct uld_info *u;
3440 
3441 	mtx_lock(&t3_uld_list_lock);
3442 	SLIST_FOREACH(u, &t3_uld_list, link) {
3443 	    if (u->uld_id == ui->uld_id) {
3444 		    rc = EEXIST;
3445 		    goto done;
3446 	    }
3447 	}
3448 
3449 	SLIST_INSERT_HEAD(&t3_uld_list, ui, link);
3450 	ui->refcount = 0;
3451 done:
3452 	mtx_unlock(&t3_uld_list_lock);
3453 	return (rc);
3454 }
3455 
3456 int
t3_unregister_uld(struct uld_info * ui)3457 t3_unregister_uld(struct uld_info *ui)
3458 {
3459 	int rc = EINVAL;
3460 	struct uld_info *u;
3461 
3462 	mtx_lock(&t3_uld_list_lock);
3463 
3464 	SLIST_FOREACH(u, &t3_uld_list, link) {
3465 	    if (u == ui) {
3466 		    if (ui->refcount > 0) {
3467 			    rc = EBUSY;
3468 			    goto done;
3469 		    }
3470 
3471 		    SLIST_REMOVE(&t3_uld_list, ui, uld_info, link);
3472 		    rc = 0;
3473 		    goto done;
3474 	    }
3475 	}
3476 done:
3477 	mtx_unlock(&t3_uld_list_lock);
3478 	return (rc);
3479 }
3480 
3481 int
t3_activate_uld(struct adapter * sc,int id)3482 t3_activate_uld(struct adapter *sc, int id)
3483 {
3484 	int rc = EAGAIN;
3485 	struct uld_info *ui;
3486 
3487 	mtx_lock(&t3_uld_list_lock);
3488 
3489 	SLIST_FOREACH(ui, &t3_uld_list, link) {
3490 		if (ui->uld_id == id) {
3491 			rc = ui->activate(sc);
3492 			if (rc == 0)
3493 				ui->refcount++;
3494 			goto done;
3495 		}
3496 	}
3497 done:
3498 	mtx_unlock(&t3_uld_list_lock);
3499 
3500 	return (rc);
3501 }
3502 
3503 int
t3_deactivate_uld(struct adapter * sc,int id)3504 t3_deactivate_uld(struct adapter *sc, int id)
3505 {
3506 	int rc = EINVAL;
3507 	struct uld_info *ui;
3508 
3509 	mtx_lock(&t3_uld_list_lock);
3510 
3511 	SLIST_FOREACH(ui, &t3_uld_list, link) {
3512 		if (ui->uld_id == id) {
3513 			rc = ui->deactivate(sc);
3514 			if (rc == 0)
3515 				ui->refcount--;
3516 			goto done;
3517 		}
3518 	}
3519 done:
3520 	mtx_unlock(&t3_uld_list_lock);
3521 
3522 	return (rc);
3523 }
3524 
3525 static int
cpl_not_handled(struct sge_qset * qs __unused,struct rsp_desc * r __unused,struct mbuf * m)3526 cpl_not_handled(struct sge_qset *qs __unused, struct rsp_desc *r __unused,
3527     struct mbuf *m)
3528 {
3529 	m_freem(m);
3530 	return (EDOOFUS);
3531 }
3532 
3533 int
t3_register_cpl_handler(struct adapter * sc,int opcode,cpl_handler_t h)3534 t3_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
3535 {
3536 	uintptr_t *loc, new;
3537 
3538 	if (opcode >= NUM_CPL_HANDLERS)
3539 		return (EINVAL);
3540 
3541 	new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
3542 	loc = (uintptr_t *) &sc->cpl_handler[opcode];
3543 	atomic_store_rel_ptr(loc, new);
3544 
3545 	return (0);
3546 }
3547 #endif
3548 
3549 static int
cxgbc_mod_event(module_t mod,int cmd,void * arg)3550 cxgbc_mod_event(module_t mod, int cmd, void *arg)
3551 {
3552 	int rc = 0;
3553 
3554 	switch (cmd) {
3555 	case MOD_LOAD:
3556 		mtx_init(&t3_list_lock, "T3 adapters", 0, MTX_DEF);
3557 		SLIST_INIT(&t3_list);
3558 #ifdef TCP_OFFLOAD
3559 		mtx_init(&t3_uld_list_lock, "T3 ULDs", 0, MTX_DEF);
3560 		SLIST_INIT(&t3_uld_list);
3561 #endif
3562 		break;
3563 
3564 	case MOD_UNLOAD:
3565 #ifdef TCP_OFFLOAD
3566 		mtx_lock(&t3_uld_list_lock);
3567 		if (!SLIST_EMPTY(&t3_uld_list)) {
3568 			rc = EBUSY;
3569 			mtx_unlock(&t3_uld_list_lock);
3570 			break;
3571 		}
3572 		mtx_unlock(&t3_uld_list_lock);
3573 		mtx_destroy(&t3_uld_list_lock);
3574 #endif
3575 		mtx_lock(&t3_list_lock);
3576 		if (!SLIST_EMPTY(&t3_list)) {
3577 			rc = EBUSY;
3578 			mtx_unlock(&t3_list_lock);
3579 			break;
3580 		}
3581 		mtx_unlock(&t3_list_lock);
3582 		mtx_destroy(&t3_list_lock);
3583 		break;
3584 	}
3585 
3586 	return (rc);
3587 }
3588 
3589 #ifdef DEBUGNET
3590 static void
cxgb_debugnet_init(if_t ifp,int * nrxr,int * ncl,int * clsize)3591 cxgb_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize)
3592 {
3593 	struct port_info *pi;
3594 	adapter_t *adap;
3595 
3596 	pi = if_getsoftc(ifp);
3597 	adap = pi->adapter;
3598 	ADAPTER_LOCK(adap);
3599 	*nrxr = adap->nqsets;
3600 	*ncl = adap->sge.qs[0].fl[1].size;
3601 	*clsize = adap->sge.qs[0].fl[1].buf_size;
3602 	ADAPTER_UNLOCK(adap);
3603 }
3604 
3605 static void
cxgb_debugnet_event(if_t ifp,enum debugnet_ev event)3606 cxgb_debugnet_event(if_t ifp, enum debugnet_ev event)
3607 {
3608 	struct port_info *pi;
3609 	struct sge_qset *qs;
3610 	int i;
3611 
3612 	pi = if_getsoftc(ifp);
3613 	if (event == DEBUGNET_START)
3614 		for (i = 0; i < pi->adapter->nqsets; i++) {
3615 			qs = &pi->adapter->sge.qs[i];
3616 
3617 			/* Need to reinit after debugnet_mbuf_start(). */
3618 			qs->fl[0].zone = zone_pack;
3619 			qs->fl[1].zone = zone_clust;
3620 			qs->lro.enabled = 0;
3621 		}
3622 }
3623 
3624 static int
cxgb_debugnet_transmit(if_t ifp,struct mbuf * m)3625 cxgb_debugnet_transmit(if_t ifp, struct mbuf *m)
3626 {
3627 	struct port_info *pi;
3628 	struct sge_qset *qs;
3629 
3630 	pi = if_getsoftc(ifp);
3631 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
3632 	    IFF_DRV_RUNNING)
3633 		return (ENOENT);
3634 
3635 	qs = &pi->adapter->sge.qs[pi->first_qset];
3636 	return (cxgb_debugnet_encap(qs, &m));
3637 }
3638 
3639 static int
cxgb_debugnet_poll(if_t ifp,int count)3640 cxgb_debugnet_poll(if_t ifp, int count)
3641 {
3642 	struct port_info *pi;
3643 	adapter_t *adap;
3644 	int i;
3645 
3646 	pi = if_getsoftc(ifp);
3647 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
3648 		return (ENOENT);
3649 
3650 	adap = pi->adapter;
3651 	for (i = 0; i < adap->nqsets; i++)
3652 		(void)cxgb_debugnet_poll_rx(adap, &adap->sge.qs[i]);
3653 	(void)cxgb_debugnet_poll_tx(&adap->sge.qs[pi->first_qset]);
3654 	return (0);
3655 }
3656 #endif /* DEBUGNET */
3657