xref: /freebsd/sys/dev/cxgb/cxgb_main.c (revision aa386085)
1 /**************************************************************************
2 SPDX-License-Identifier: BSD-2-Clause
3 
4 Copyright (c) 2007-2009, Chelsio Inc.
5 All rights reserved.
6 
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
9 
10  1. Redistributions of source code must retain the above copyright notice,
11     this list of conditions and the following disclaimer.
12 
13  2. Neither the name of the Chelsio Corporation nor the names of its
14     contributors may be used to endorse or promote products derived from
15     this software without specific prior written permission.
16 
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 POSSIBILITY OF SUCH DAMAGE.
28 
29 ***************************************************************************/
30 
31 #include <sys/cdefs.h>
32 #include "opt_inet.h"
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/bus.h>
38 #include <sys/module.h>
39 #include <sys/pciio.h>
40 #include <sys/conf.h>
41 #include <machine/bus.h>
42 #include <machine/resource.h>
43 #include <sys/ktr.h>
44 #include <sys/rman.h>
45 #include <sys/ioccom.h>
46 #include <sys/mbuf.h>
47 #include <sys/linker.h>
48 #include <sys/firmware.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/smp.h>
52 #include <sys/sysctl.h>
53 #include <sys/syslog.h>
54 #include <sys/queue.h>
55 #include <sys/taskqueue.h>
56 #include <sys/proc.h>
57 
58 #include <net/bpf.h>
59 #include <net/debugnet.h>
60 #include <net/ethernet.h>
61 #include <net/if.h>
62 #include <net/if_var.h>
63 #include <net/if_arp.h>
64 #include <net/if_dl.h>
65 #include <net/if_media.h>
66 #include <net/if_types.h>
67 #include <net/if_vlan_var.h>
68 
69 #include <netinet/in_systm.h>
70 #include <netinet/in.h>
71 #include <netinet/if_ether.h>
72 #include <netinet/ip.h>
73 #include <netinet/ip.h>
74 #include <netinet/tcp.h>
75 #include <netinet/udp.h>
76 
77 #include <dev/pci/pcireg.h>
78 #include <dev/pci/pcivar.h>
79 #include <dev/pci/pci_private.h>
80 
81 #include <cxgb_include.h>
82 
83 #ifdef PRIV_SUPPORTED
84 #include <sys/priv.h>
85 #endif
86 
87 static int cxgb_setup_interrupts(adapter_t *);
88 static void cxgb_teardown_interrupts(adapter_t *);
89 static void cxgb_init(void *);
90 static int cxgb_init_locked(struct port_info *);
91 static int cxgb_uninit_locked(struct port_info *);
92 static int cxgb_uninit_synchronized(struct port_info *);
93 static int cxgb_ioctl(if_t, unsigned long, caddr_t);
94 static int cxgb_media_change(if_t);
95 static int cxgb_ifm_type(int);
96 static void cxgb_build_medialist(struct port_info *);
97 static void cxgb_media_status(if_t, struct ifmediareq *);
98 static uint64_t cxgb_get_counter(if_t, ift_counter);
99 static int setup_sge_qsets(adapter_t *);
100 static void cxgb_async_intr(void *);
101 static void cxgb_tick_handler(void *, int);
102 static void cxgb_tick(void *);
103 static void link_check_callout(void *);
104 static void check_link_status(void *, int);
105 static void setup_rss(adapter_t *sc);
106 static int alloc_filters(struct adapter *);
107 static int setup_hw_filters(struct adapter *);
108 static int set_filter(struct adapter *, int, const struct filter_info *);
109 static inline void mk_set_tcb_field(struct cpl_set_tcb_field *, unsigned int,
110     unsigned int, u64, u64);
111 static inline void set_tcb_field_ulp(struct cpl_set_tcb_field *, unsigned int,
112     unsigned int, u64, u64);
113 #ifdef TCP_OFFLOAD
114 static int cpl_not_handled(struct sge_qset *, struct rsp_desc *, struct mbuf *);
115 #endif
116 
117 /* Attachment glue for the PCI controller end of the device.  Each port of
118  * the device is attached separately, as defined later.
119  */
120 static int cxgb_controller_probe(device_t);
121 static int cxgb_controller_attach(device_t);
122 static int cxgb_controller_detach(device_t);
123 static void cxgb_free(struct adapter *);
124 static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
125     unsigned int end);
126 static void cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf);
127 static int cxgb_get_regs_len(void);
128 static void touch_bars(device_t dev);
129 static void cxgb_update_mac_settings(struct port_info *p);
130 #ifdef TCP_OFFLOAD
131 static int toe_capability(struct port_info *, int);
132 #endif
133 
134 /* Table for probing the cards.  The desc field isn't actually used */
135 struct cxgb_ident {
136 	uint16_t	vendor;
137 	uint16_t	device;
138 	int		index;
139 	char		*desc;
140 } cxgb_identifiers[] = {
141 	{PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
142 	{PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
143 	{PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
144 	{PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
145 	{PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
146 	{PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
147 	{PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
148 	{PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
149 	{PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
150 	{PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
151 	{PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"},
152 	{PCI_VENDOR_ID_CHELSIO, 0x0035, 6, "T3C10"},
153 	{PCI_VENDOR_ID_CHELSIO, 0x0036, 3, "S320E-CR"},
154 	{PCI_VENDOR_ID_CHELSIO, 0x0037, 7, "N320E-G2"},
155 	{0, 0, 0, NULL}
156 };
157 
158 static device_method_t cxgb_controller_methods[] = {
159 	DEVMETHOD(device_probe,		cxgb_controller_probe),
160 	DEVMETHOD(device_attach,	cxgb_controller_attach),
161 	DEVMETHOD(device_detach,	cxgb_controller_detach),
162 
163 	DEVMETHOD_END
164 };
165 
166 static driver_t cxgb_controller_driver = {
167 	"cxgbc",
168 	cxgb_controller_methods,
169 	sizeof(struct adapter)
170 };
171 
172 static int cxgbc_mod_event(module_t, int, void *);
173 
174 DRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgbc_mod_event, NULL);
175 MODULE_PNP_INFO("U16:vendor;U16:device", pci, cxgbc, cxgb_identifiers,
176     nitems(cxgb_identifiers) - 1);
177 MODULE_VERSION(cxgbc, 1);
178 MODULE_DEPEND(cxgbc, firmware, 1, 1, 1);
179 
180 /*
181  * Attachment glue for the ports.  Attachment is done directly to the
182  * controller device.
183  */
184 static int cxgb_port_probe(device_t);
185 static int cxgb_port_attach(device_t);
186 static int cxgb_port_detach(device_t);
187 
188 static device_method_t cxgb_port_methods[] = {
189 	DEVMETHOD(device_probe,		cxgb_port_probe),
190 	DEVMETHOD(device_attach,	cxgb_port_attach),
191 	DEVMETHOD(device_detach,	cxgb_port_detach),
192 	{ 0, 0 }
193 };
194 
195 static driver_t cxgb_port_driver = {
196 	"cxgb",
197 	cxgb_port_methods,
198 	0
199 };
200 
201 static d_ioctl_t cxgb_extension_ioctl;
202 static d_open_t cxgb_extension_open;
203 static d_close_t cxgb_extension_close;
204 
205 static struct cdevsw cxgb_cdevsw = {
206        .d_version =    D_VERSION,
207        .d_flags =      0,
208        .d_open =       cxgb_extension_open,
209        .d_close =      cxgb_extension_close,
210        .d_ioctl =      cxgb_extension_ioctl,
211        .d_name =       "cxgb",
212 };
213 
214 DRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, 0, 0);
215 MODULE_VERSION(cxgb, 1);
216 
217 DEBUGNET_DEFINE(cxgb);
218 
219 static struct mtx t3_list_lock;
220 static SLIST_HEAD(, adapter) t3_list;
221 #ifdef TCP_OFFLOAD
222 static struct mtx t3_uld_list_lock;
223 static SLIST_HEAD(, uld_info) t3_uld_list;
224 #endif
225 
226 /*
227  * The driver uses the best interrupt scheme available on a platform in the
228  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
229  * of these schemes the driver may consider as follows:
230  *
231  * msi = 2: choose from among all three options
232  * msi = 1 : only consider MSI and pin interrupts
233  * msi = 0: force pin interrupts
234  */
235 static int msi_allowed = 2;
236 
237 SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
238     "CXGB driver parameters");
239 SYSCTL_INT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
240     "MSI-X, MSI, INTx selector");
241 
242 /*
243  * The driver uses an auto-queue algorithm by default.
244  * To disable it and force a single queue-set per port, use multiq = 0
245  */
246 static int multiq = 1;
247 SYSCTL_INT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0,
248     "use min(ncpus/ports, 8) queue-sets per port");
249 
250 /*
251  * By default the driver will not update the firmware unless
252  * it was compiled against a newer version
253  *
254  */
255 static int force_fw_update = 0;
256 SYSCTL_INT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0,
257     "update firmware even if up to date");
258 
259 int cxgb_use_16k_clusters = -1;
260 SYSCTL_INT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN,
261     &cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue ");
262 
263 static int nfilters = -1;
264 SYSCTL_INT(_hw_cxgb, OID_AUTO, nfilters, CTLFLAG_RDTUN,
265     &nfilters, 0, "max number of entries in the filter table");
266 
267 enum {
268 	MAX_TXQ_ENTRIES      = 16384,
269 	MAX_CTRL_TXQ_ENTRIES = 1024,
270 	MAX_RSPQ_ENTRIES     = 16384,
271 	MAX_RX_BUFFERS       = 16384,
272 	MAX_RX_JUMBO_BUFFERS = 16384,
273 	MIN_TXQ_ENTRIES      = 4,
274 	MIN_CTRL_TXQ_ENTRIES = 4,
275 	MIN_RSPQ_ENTRIES     = 32,
276 	MIN_FL_ENTRIES       = 32,
277 	MIN_FL_JUMBO_ENTRIES = 32
278 };
279 
280 struct filter_info {
281 	u32 sip;
282 	u32 sip_mask;
283 	u32 dip;
284 	u16 sport;
285 	u16 dport;
286 	u32 vlan:12;
287 	u32 vlan_prio:3;
288 	u32 mac_hit:1;
289 	u32 mac_idx:4;
290 	u32 mac_vld:1;
291 	u32 pkt_type:2;
292 	u32 report_filter_id:1;
293 	u32 pass:1;
294 	u32 rss:1;
295 	u32 qset:3;
296 	u32 locked:1;
297 	u32 valid:1;
298 };
299 
300 enum { FILTER_NO_VLAN_PRI = 7 };
301 
302 #define EEPROM_MAGIC 0x38E2F10C
303 
304 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
305 
306 
307 static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset);
308 
309 
310 static __inline char
t3rev2char(struct adapter * adapter)311 t3rev2char(struct adapter *adapter)
312 {
313 	char rev = 'z';
314 
315 	switch(adapter->params.rev) {
316 	case T3_REV_A:
317 		rev = 'a';
318 		break;
319 	case T3_REV_B:
320 	case T3_REV_B2:
321 		rev = 'b';
322 		break;
323 	case T3_REV_C:
324 		rev = 'c';
325 		break;
326 	}
327 	return rev;
328 }
329 
330 static struct cxgb_ident *
cxgb_get_ident(device_t dev)331 cxgb_get_ident(device_t dev)
332 {
333 	struct cxgb_ident *id;
334 
335 	for (id = cxgb_identifiers; id->desc != NULL; id++) {
336 		if ((id->vendor == pci_get_vendor(dev)) &&
337 		    (id->device == pci_get_device(dev))) {
338 			return (id);
339 		}
340 	}
341 	return (NULL);
342 }
343 
344 static const struct adapter_info *
cxgb_get_adapter_info(device_t dev)345 cxgb_get_adapter_info(device_t dev)
346 {
347 	struct cxgb_ident *id;
348 	const struct adapter_info *ai;
349 
350 	id = cxgb_get_ident(dev);
351 	if (id == NULL)
352 		return (NULL);
353 
354 	ai = t3_get_adapter_info(id->index);
355 
356 	return (ai);
357 }
358 
359 static int
cxgb_controller_probe(device_t dev)360 cxgb_controller_probe(device_t dev)
361 {
362 	const struct adapter_info *ai;
363 	const char *ports;
364 	int nports;
365 
366 	ai = cxgb_get_adapter_info(dev);
367 	if (ai == NULL)
368 		return (ENXIO);
369 
370 	nports = ai->nports0 + ai->nports1;
371 	if (nports == 1)
372 		ports = "port";
373 	else
374 		ports = "ports";
375 
376 	device_set_descf(dev, "%s, %d %s", ai->desc, nports, ports);
377 	return (BUS_PROBE_DEFAULT);
378 }
379 
380 #define FW_FNAME "cxgb_t3fw"
381 #define TPEEPROM_NAME "cxgb_t3%c_tp_eeprom"
382 #define TPSRAM_NAME "cxgb_t3%c_protocol_sram"
383 
384 static int
upgrade_fw(adapter_t * sc)385 upgrade_fw(adapter_t *sc)
386 {
387 	const struct firmware *fw;
388 	int status;
389 	u32 vers;
390 
391 	if ((fw = firmware_get(FW_FNAME)) == NULL)  {
392 		device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME);
393 		return (ENOENT);
394 	} else
395 		device_printf(sc->dev, "installing firmware on card\n");
396 	status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
397 
398 	if (status != 0) {
399 		device_printf(sc->dev, "failed to install firmware: %d\n",
400 		    status);
401 	} else {
402 		t3_get_fw_version(sc, &vers);
403 		snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
404 		    G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
405 		    G_FW_VERSION_MICRO(vers));
406 	}
407 
408 	firmware_put(fw, FIRMWARE_UNLOAD);
409 
410 	return (status);
411 }
412 
413 /*
414  * The cxgb_controller_attach function is responsible for the initial
415  * bringup of the device.  Its responsibilities include:
416  *
417  *  1. Determine if the device supports MSI or MSI-X.
418  *  2. Allocate bus resources so that we can access the Base Address Register
419  *  3. Create and initialize mutexes for the controller and its control
420  *     logic such as SGE and MDIO.
421  *  4. Call hardware specific setup routine for the adapter as a whole.
422  *  5. Allocate the BAR for doing MSI-X.
423  *  6. Setup the line interrupt iff MSI-X is not supported.
424  *  7. Create the driver's taskq.
425  *  8. Start one task queue service thread.
426  *  9. Check if the firmware and SRAM are up-to-date.  They will be
427  *     auto-updated later (before FULL_INIT_DONE), if required.
428  * 10. Create a child device for each MAC (port)
429  * 11. Initialize T3 private state.
430  * 12. Trigger the LED
431  * 13. Setup offload iff supported.
432  * 14. Reset/restart the tick callout.
433  * 15. Attach sysctls
434  *
435  * NOTE: Any modification or deviation from this list MUST be reflected in
436  * the above comment.  Failure to do so will result in problems on various
437  * error conditions including link flapping.
438  */
439 static int
cxgb_controller_attach(device_t dev)440 cxgb_controller_attach(device_t dev)
441 {
442 	device_t child;
443 	const struct adapter_info *ai;
444 	struct adapter *sc;
445 	int i, error = 0;
446 	uint32_t vers;
447 	int port_qsets = 1;
448 	int msi_needed, reg;
449 
450 	sc = device_get_softc(dev);
451 	sc->dev = dev;
452 	sc->msi_count = 0;
453 	ai = cxgb_get_adapter_info(dev);
454 
455 	snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
456 	    device_get_unit(dev));
457 	ADAPTER_LOCK_INIT(sc, sc->lockbuf);
458 
459 	snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
460 	    device_get_unit(dev));
461 	snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
462 	    device_get_unit(dev));
463 	snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
464 	    device_get_unit(dev));
465 
466 	MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN);
467 	MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
468 	MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
469 
470 	mtx_lock(&t3_list_lock);
471 	SLIST_INSERT_HEAD(&t3_list, sc, link);
472 	mtx_unlock(&t3_list_lock);
473 
474 	/* find the PCIe link width and set max read request to 4KB*/
475 	if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
476 		uint16_t lnk;
477 
478 		lnk = pci_read_config(dev, reg + PCIER_LINK_STA, 2);
479 		sc->link_width = (lnk & PCIEM_LINK_STA_WIDTH) >> 4;
480 		if (sc->link_width < 8 &&
481 		    (ai->caps & SUPPORTED_10000baseT_Full)) {
482 			device_printf(sc->dev,
483 			    "PCIe x%d Link, expect reduced performance\n",
484 			    sc->link_width);
485 		}
486 
487 		pci_set_max_read_req(dev, 4096);
488 	}
489 
490 	touch_bars(dev);
491 	pci_enable_busmaster(dev);
492 	/*
493 	 * Allocate the registers and make them available to the driver.
494 	 * The registers that we care about for NIC mode are in BAR 0
495 	 */
496 	sc->regs_rid = PCIR_BAR(0);
497 	if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
498 	    &sc->regs_rid, RF_ACTIVE)) == NULL) {
499 		device_printf(dev, "Cannot allocate BAR region 0\n");
500 		error = ENXIO;
501 		goto out;
502 	}
503 
504 	sc->bt = rman_get_bustag(sc->regs_res);
505 	sc->bh = rman_get_bushandle(sc->regs_res);
506 	sc->mmio_len = rman_get_size(sc->regs_res);
507 
508 	for (i = 0; i < MAX_NPORTS; i++)
509 		sc->port[i].adapter = sc;
510 
511 	if (t3_prep_adapter(sc, ai, 1) < 0) {
512 		printf("prep adapter failed\n");
513 		error = ENODEV;
514 		goto out;
515 	}
516 
517 	sc->udbs_rid = PCIR_BAR(2);
518 	sc->udbs_res = NULL;
519 	if (is_offload(sc) &&
520 	    ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
521 		   &sc->udbs_rid, RF_ACTIVE)) == NULL)) {
522 		device_printf(dev, "Cannot allocate BAR region 1\n");
523 		error = ENXIO;
524 		goto out;
525 	}
526 
527         /* Allocate the BAR for doing MSI-X.  If it succeeds, try to allocate
528 	 * enough messages for the queue sets.  If that fails, try falling
529 	 * back to MSI.  If that fails, then try falling back to the legacy
530 	 * interrupt pin model.
531 	 */
532 	sc->msix_regs_rid = 0x20;
533 	if ((msi_allowed >= 2) &&
534 	    (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
535 	    &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
536 
537 		if (multiq)
538 			port_qsets = min(SGE_QSETS/sc->params.nports, mp_ncpus);
539 		msi_needed = sc->msi_count = sc->params.nports * port_qsets + 1;
540 
541 		if (pci_msix_count(dev) == 0 ||
542 		    (error = pci_alloc_msix(dev, &sc->msi_count)) != 0 ||
543 		    sc->msi_count != msi_needed) {
544 			device_printf(dev, "alloc msix failed - "
545 				      "msi_count=%d, msi_needed=%d, err=%d; "
546 				      "will try MSI\n", sc->msi_count,
547 				      msi_needed, error);
548 			sc->msi_count = 0;
549 			port_qsets = 1;
550 			pci_release_msi(dev);
551 			bus_release_resource(dev, SYS_RES_MEMORY,
552 			    sc->msix_regs_rid, sc->msix_regs_res);
553 			sc->msix_regs_res = NULL;
554 		} else {
555 			sc->flags |= USING_MSIX;
556 			sc->cxgb_intr = cxgb_async_intr;
557 			device_printf(dev,
558 				      "using MSI-X interrupts (%u vectors)\n",
559 				      sc->msi_count);
560 		}
561 	}
562 
563 	if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
564 		sc->msi_count = 1;
565 		if ((error = pci_alloc_msi(dev, &sc->msi_count)) != 0) {
566 			device_printf(dev, "alloc msi failed - "
567 				      "err=%d; will try INTx\n", error);
568 			sc->msi_count = 0;
569 			port_qsets = 1;
570 			pci_release_msi(dev);
571 		} else {
572 			sc->flags |= USING_MSI;
573 			sc->cxgb_intr = t3_intr_msi;
574 			device_printf(dev, "using MSI interrupts\n");
575 		}
576 	}
577 	if (sc->msi_count == 0) {
578 		device_printf(dev, "using line interrupts\n");
579 		sc->cxgb_intr = t3b_intr;
580 	}
581 
582 	/* Create a private taskqueue thread for handling driver events */
583 	sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
584 	    taskqueue_thread_enqueue, &sc->tq);
585 	if (sc->tq == NULL) {
586 		device_printf(dev, "failed to allocate controller task queue\n");
587 		goto out;
588 	}
589 
590 	taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
591 	    device_get_nameunit(dev));
592 	TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
593 
594 
595 	/* Create a periodic callout for checking adapter status */
596 	callout_init(&sc->cxgb_tick_ch, 1);
597 
598 	if (t3_check_fw_version(sc) < 0 || force_fw_update) {
599 		/*
600 		 * Warn user that a firmware update will be attempted in init.
601 		 */
602 		device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
603 		    FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
604 		sc->flags &= ~FW_UPTODATE;
605 	} else {
606 		sc->flags |= FW_UPTODATE;
607 	}
608 
609 	if (t3_check_tpsram_version(sc) < 0) {
610 		/*
611 		 * Warn user that a firmware update will be attempted in init.
612 		 */
613 		device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n",
614 		    t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
615 		sc->flags &= ~TPS_UPTODATE;
616 	} else {
617 		sc->flags |= TPS_UPTODATE;
618 	}
619 
620 	/*
621 	 * Create a child device for each MAC.  The ethernet attachment
622 	 * will be done in these children.
623 	 */
624 	for (i = 0; i < (sc)->params.nports; i++) {
625 		struct port_info *pi;
626 
627 		if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
628 			device_printf(dev, "failed to add child port\n");
629 			error = EINVAL;
630 			goto out;
631 		}
632 		pi = &sc->port[i];
633 		pi->adapter = sc;
634 		pi->nqsets = port_qsets;
635 		pi->first_qset = i*port_qsets;
636 		pi->port_id = i;
637 		pi->tx_chan = i >= ai->nports0;
638 		pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i;
639 		sc->rxpkt_map[pi->txpkt_intf] = i;
640 		sc->port[i].tx_chan = i >= ai->nports0;
641 		sc->portdev[i] = child;
642 		device_set_softc(child, pi);
643 	}
644 	if ((error = bus_generic_attach(dev)) != 0)
645 		goto out;
646 
647 	/* initialize sge private state */
648 	t3_sge_init_adapter(sc);
649 
650 	t3_led_ready(sc);
651 
652 	error = t3_get_fw_version(sc, &vers);
653 	if (error)
654 		goto out;
655 
656 	snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
657 	    G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
658 	    G_FW_VERSION_MICRO(vers));
659 
660 	device_set_descf(dev, "%s %sNIC\t E/C: %s S/N: %s",
661 	    ai->desc, is_offload(sc) ? "R" : "",
662 	    sc->params.vpd.ec, sc->params.vpd.sn);
663 
664 	snprintf(&sc->port_types[0], sizeof(sc->port_types), "%x%x%x%x",
665 		 sc->params.vpd.port_type[0], sc->params.vpd.port_type[1],
666 		 sc->params.vpd.port_type[2], sc->params.vpd.port_type[3]);
667 
668 	device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]);
669 	callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
670 	t3_add_attach_sysctls(sc);
671 
672 #ifdef TCP_OFFLOAD
673 	for (i = 0; i < NUM_CPL_HANDLERS; i++)
674 		sc->cpl_handler[i] = cpl_not_handled;
675 #endif
676 
677 	t3_intr_clear(sc);
678 	error = cxgb_setup_interrupts(sc);
679 out:
680 	if (error)
681 		cxgb_free(sc);
682 
683 	return (error);
684 }
685 
686 /*
687  * The cxgb_controller_detach routine is called with the device is
688  * unloaded from the system.
689  */
690 
691 static int
cxgb_controller_detach(device_t dev)692 cxgb_controller_detach(device_t dev)
693 {
694 	struct adapter *sc;
695 
696 	sc = device_get_softc(dev);
697 
698 	cxgb_free(sc);
699 
700 	return (0);
701 }
702 
703 /*
704  * The cxgb_free() is called by the cxgb_controller_detach() routine
705  * to tear down the structures that were built up in
706  * cxgb_controller_attach(), and should be the final piece of work
707  * done when fully unloading the driver.
708  *
709  *
710  *  1. Shutting down the threads started by the cxgb_controller_attach()
711  *     routine.
712  *  2. Stopping the lower level device and all callouts (cxgb_down_locked()).
713  *  3. Detaching all of the port devices created during the
714  *     cxgb_controller_attach() routine.
715  *  4. Removing the device children created via cxgb_controller_attach().
716  *  5. Releasing PCI resources associated with the device.
717  *  6. Turning off the offload support, iff it was turned on.
718  *  7. Destroying the mutexes created in cxgb_controller_attach().
719  *
720  */
721 static void
cxgb_free(struct adapter * sc)722 cxgb_free(struct adapter *sc)
723 {
724 	int i, nqsets = 0;
725 
726 	ADAPTER_LOCK(sc);
727 	sc->flags |= CXGB_SHUTDOWN;
728 	ADAPTER_UNLOCK(sc);
729 
730 	/*
731 	 * Make sure all child devices are gone.
732 	 */
733 	bus_generic_detach(sc->dev);
734 	for (i = 0; i < (sc)->params.nports; i++) {
735 		if (sc->portdev[i] &&
736 		    device_delete_child(sc->dev, sc->portdev[i]) != 0)
737 			device_printf(sc->dev, "failed to delete child port\n");
738 		nqsets += sc->port[i].nqsets;
739 	}
740 
741 	/*
742 	 * At this point, it is as if cxgb_port_detach has run on all ports, and
743 	 * cxgb_down has run on the adapter.  All interrupts have been silenced,
744 	 * all open devices have been closed.
745 	 */
746 	KASSERT(sc->open_device_map == 0, ("%s: device(s) still open (%x)",
747 					   __func__, sc->open_device_map));
748 	for (i = 0; i < sc->params.nports; i++) {
749 		KASSERT(sc->port[i].ifp == NULL, ("%s: port %i undead!",
750 						  __func__, i));
751 	}
752 
753 	/*
754 	 * Finish off the adapter's callouts.
755 	 */
756 	callout_drain(&sc->cxgb_tick_ch);
757 	callout_drain(&sc->sge_timer_ch);
758 
759 	/*
760 	 * Release resources grabbed under FULL_INIT_DONE by cxgb_up.  The
761 	 * sysctls are cleaned up by the kernel linker.
762 	 */
763 	if (sc->flags & FULL_INIT_DONE) {
764  		t3_free_sge_resources(sc, nqsets);
765  		sc->flags &= ~FULL_INIT_DONE;
766  	}
767 
768 	/*
769 	 * Release all interrupt resources.
770 	 */
771 	cxgb_teardown_interrupts(sc);
772 	if (sc->flags & (USING_MSI | USING_MSIX)) {
773 		device_printf(sc->dev, "releasing msi message(s)\n");
774 		pci_release_msi(sc->dev);
775 	} else {
776 		device_printf(sc->dev, "no msi message to release\n");
777 	}
778 
779 	if (sc->msix_regs_res != NULL) {
780 		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
781 		    sc->msix_regs_res);
782 	}
783 
784 	/*
785 	 * Free the adapter's taskqueue.
786 	 */
787 	if (sc->tq != NULL) {
788 		taskqueue_free(sc->tq);
789 		sc->tq = NULL;
790 	}
791 
792 	free(sc->filters, M_DEVBUF);
793 	t3_sge_free(sc);
794 
795 	if (sc->udbs_res != NULL)
796 		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid,
797 		    sc->udbs_res);
798 
799 	if (sc->regs_res != NULL)
800 		bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
801 		    sc->regs_res);
802 
803 	MTX_DESTROY(&sc->mdio_lock);
804 	MTX_DESTROY(&sc->sge.reg_lock);
805 	MTX_DESTROY(&sc->elmer_lock);
806 	mtx_lock(&t3_list_lock);
807 	SLIST_REMOVE(&t3_list, sc, adapter, link);
808 	mtx_unlock(&t3_list_lock);
809 	ADAPTER_LOCK_DEINIT(sc);
810 }
811 
812 /**
813  *	setup_sge_qsets - configure SGE Tx/Rx/response queues
814  *	@sc: the controller softc
815  *
816  *	Determines how many sets of SGE queues to use and initializes them.
817  *	We support multiple queue sets per port if we have MSI-X, otherwise
818  *	just one queue set per port.
819  */
820 static int
setup_sge_qsets(adapter_t * sc)821 setup_sge_qsets(adapter_t *sc)
822 {
823 	int i, j, err, irq_idx = 0, qset_idx = 0;
824 	u_int ntxq = SGE_TXQ_PER_SET;
825 
826 	if ((err = t3_sge_alloc(sc)) != 0) {
827 		device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
828 		return (err);
829 	}
830 
831 	if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
832 		irq_idx = -1;
833 
834 	for (i = 0; i < (sc)->params.nports; i++) {
835 		struct port_info *pi = &sc->port[i];
836 
837 		for (j = 0; j < pi->nqsets; j++, qset_idx++) {
838 			err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
839 			    (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
840 			    &sc->params.sge.qset[qset_idx], ntxq, pi);
841 			if (err) {
842 				t3_free_sge_resources(sc, qset_idx);
843 				device_printf(sc->dev,
844 				    "t3_sge_alloc_qset failed with %d\n", err);
845 				return (err);
846 			}
847 		}
848 	}
849 
850 	sc->nqsets = qset_idx;
851 
852 	return (0);
853 }
854 
855 static void
cxgb_teardown_interrupts(adapter_t * sc)856 cxgb_teardown_interrupts(adapter_t *sc)
857 {
858 	int i;
859 
860 	for (i = 0; i < SGE_QSETS; i++) {
861 		if (sc->msix_intr_tag[i] == NULL) {
862 
863 			/* Should have been setup fully or not at all */
864 			KASSERT(sc->msix_irq_res[i] == NULL &&
865 				sc->msix_irq_rid[i] == 0,
866 				("%s: half-done interrupt (%d).", __func__, i));
867 
868 			continue;
869 		}
870 
871 		bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
872 				  sc->msix_intr_tag[i]);
873 		bus_release_resource(sc->dev, SYS_RES_IRQ, sc->msix_irq_rid[i],
874 				     sc->msix_irq_res[i]);
875 
876 		sc->msix_irq_res[i] = sc->msix_intr_tag[i] = NULL;
877 		sc->msix_irq_rid[i] = 0;
878 	}
879 
880 	if (sc->intr_tag) {
881 		KASSERT(sc->irq_res != NULL,
882 			("%s: half-done interrupt.", __func__));
883 
884 		bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
885 		bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
886 				     sc->irq_res);
887 
888 		sc->irq_res = sc->intr_tag = NULL;
889 		sc->irq_rid = 0;
890 	}
891 }
892 
893 static int
cxgb_setup_interrupts(adapter_t * sc)894 cxgb_setup_interrupts(adapter_t *sc)
895 {
896 	struct resource *res;
897 	void *tag;
898 	int i, rid, err, intr_flag = sc->flags & (USING_MSI | USING_MSIX);
899 
900 	sc->irq_rid = intr_flag ? 1 : 0;
901 	sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid,
902 					     RF_SHAREABLE | RF_ACTIVE);
903 	if (sc->irq_res == NULL) {
904 		device_printf(sc->dev, "Cannot allocate interrupt (%x, %u)\n",
905 			      intr_flag, sc->irq_rid);
906 		err = EINVAL;
907 		sc->irq_rid = 0;
908 	} else {
909 		err = bus_setup_intr(sc->dev, sc->irq_res,
910 		    INTR_MPSAFE | INTR_TYPE_NET, NULL,
911 		    sc->cxgb_intr, sc, &sc->intr_tag);
912 
913 		if (err) {
914 			device_printf(sc->dev,
915 				      "Cannot set up interrupt (%x, %u, %d)\n",
916 				      intr_flag, sc->irq_rid, err);
917 			bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
918 					     sc->irq_res);
919 			sc->irq_res = sc->intr_tag = NULL;
920 			sc->irq_rid = 0;
921 		}
922 	}
923 
924 	/* That's all for INTx or MSI */
925 	if (!(intr_flag & USING_MSIX) || err)
926 		return (err);
927 
928 	bus_describe_intr(sc->dev, sc->irq_res, sc->intr_tag, "err");
929 	for (i = 0; i < sc->msi_count - 1; i++) {
930 		rid = i + 2;
931 		res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid,
932 					     RF_SHAREABLE | RF_ACTIVE);
933 		if (res == NULL) {
934 			device_printf(sc->dev, "Cannot allocate interrupt "
935 				      "for message %d\n", rid);
936 			err = EINVAL;
937 			break;
938 		}
939 
940 		err = bus_setup_intr(sc->dev, res, INTR_MPSAFE | INTR_TYPE_NET,
941 				     NULL, t3_intr_msix, &sc->sge.qs[i], &tag);
942 		if (err) {
943 			device_printf(sc->dev, "Cannot set up interrupt "
944 				      "for message %d (%d)\n", rid, err);
945 			bus_release_resource(sc->dev, SYS_RES_IRQ, rid, res);
946 			break;
947 		}
948 
949 		sc->msix_irq_rid[i] = rid;
950 		sc->msix_irq_res[i] = res;
951 		sc->msix_intr_tag[i] = tag;
952 		bus_describe_intr(sc->dev, res, tag, "qs%d", i);
953 	}
954 
955 	if (err)
956 		cxgb_teardown_interrupts(sc);
957 
958 	return (err);
959 }
960 
961 
962 static int
cxgb_port_probe(device_t dev)963 cxgb_port_probe(device_t dev)
964 {
965 	struct port_info *p;
966 	const char *desc;
967 
968 	p = device_get_softc(dev);
969 	desc = p->phy.desc;
970 	device_set_descf(dev, "Port %d %s", p->port_id, desc);
971 	return (0);
972 }
973 
974 
975 static int
cxgb_makedev(struct port_info * pi)976 cxgb_makedev(struct port_info *pi)
977 {
978 
979 	pi->port_cdev = make_dev(&cxgb_cdevsw, if_getdunit(pi->ifp),
980 	    UID_ROOT, GID_WHEEL, 0600, "%s", if_name(pi->ifp));
981 
982 	if (pi->port_cdev == NULL)
983 		return (ENOMEM);
984 
985 	pi->port_cdev->si_drv1 = (void *)pi;
986 
987 	return (0);
988 }
989 
990 #define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
991     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
992     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6)
993 #define CXGB_CAP_ENABLE CXGB_CAP
994 
995 static int
cxgb_port_attach(device_t dev)996 cxgb_port_attach(device_t dev)
997 {
998 	struct port_info *p;
999 	if_t ifp;
1000 	int err;
1001 	struct adapter *sc;
1002 
1003 	p = device_get_softc(dev);
1004 	sc = p->adapter;
1005 	snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
1006 	    device_get_unit(device_get_parent(dev)), p->port_id);
1007 	PORT_LOCK_INIT(p, p->lockbuf);
1008 
1009 	callout_init(&p->link_check_ch, 1);
1010 	TASK_INIT(&p->link_check_task, 0, check_link_status, p);
1011 
1012 	/* Allocate an ifnet object and set it up */
1013 	ifp = p->ifp = if_alloc(IFT_ETHER);
1014 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1015 	if_setinitfn(ifp, cxgb_init);
1016 	if_setsoftc(ifp, p);
1017 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
1018 	if_setioctlfn(ifp, cxgb_ioctl);
1019 	if_settransmitfn(ifp, cxgb_transmit);
1020 	if_setqflushfn(ifp, cxgb_qflush);
1021 	if_setgetcounterfn(ifp, cxgb_get_counter);
1022 
1023 	if_setcapabilities(ifp, CXGB_CAP);
1024 #ifdef TCP_OFFLOAD
1025 	if (is_offload(sc))
1026 		if_setcapabilitiesbit(ifp, IFCAP_TOE4, 0);
1027 #endif
1028 	if_setcapenable(ifp, CXGB_CAP_ENABLE);
1029 	if_sethwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1030 	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1031 	if_sethwtsomax(ifp, IP_MAXPACKET);
1032 	if_sethwtsomaxsegcount(ifp, 36);
1033 	if_sethwtsomaxsegsize(ifp, 65536);
1034 
1035 	/*
1036 	 * Disable TSO on 4-port - it isn't supported by the firmware.
1037 	 */
1038 	if (sc->params.nports > 2) {
1039 		if_setcapabilitiesbit(ifp, 0, IFCAP_TSO | IFCAP_VLAN_HWTSO);
1040 		if_setcapenablebit(ifp, 0, IFCAP_TSO | IFCAP_VLAN_HWTSO);
1041 		if_sethwassistbits(ifp, 0, CSUM_TSO);
1042 	}
1043 
1044 	ether_ifattach(ifp, p->hw_addr);
1045 
1046 	/* Attach driver debugnet methods. */
1047 	DEBUGNET_SET(ifp, cxgb);
1048 
1049 #ifdef DEFAULT_JUMBO
1050 	if (sc->params.nports <= 2)
1051 		if_setmtu(ifp, ETHERMTU_JUMBO);
1052 #endif
1053 	if ((err = cxgb_makedev(p)) != 0) {
1054 		printf("makedev failed %d\n", err);
1055 		return (err);
1056 	}
1057 
1058 	/* Create a list of media supported by this port */
1059 	ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
1060 	    cxgb_media_status);
1061 	cxgb_build_medialist(p);
1062 
1063 	t3_sge_init_port(p);
1064 
1065 	return (err);
1066 }
1067 
1068 /*
1069  * cxgb_port_detach() is called via the device_detach methods when
1070  * cxgb_free() calls the bus_generic_detach.  It is responsible for
1071  * removing the device from the view of the kernel, i.e. from all
1072  * interfaces lists etc.  This routine is only called when the driver is
1073  * being unloaded, not when the link goes down.
1074  */
1075 static int
cxgb_port_detach(device_t dev)1076 cxgb_port_detach(device_t dev)
1077 {
1078 	struct port_info *p;
1079 	struct adapter *sc;
1080 	int i;
1081 
1082 	p = device_get_softc(dev);
1083 	sc = p->adapter;
1084 
1085 	/* Tell cxgb_ioctl and if_init that the port is going away */
1086 	ADAPTER_LOCK(sc);
1087 	SET_DOOMED(p);
1088 	wakeup(&sc->flags);
1089 	while (IS_BUSY(sc))
1090 		mtx_sleep(&sc->flags, &sc->lock, 0, "cxgbdtch", 0);
1091 	SET_BUSY(sc);
1092 	ADAPTER_UNLOCK(sc);
1093 
1094 	if (p->port_cdev != NULL)
1095 		destroy_dev(p->port_cdev);
1096 
1097 	cxgb_uninit_synchronized(p);
1098 	ether_ifdetach(p->ifp);
1099 
1100 	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1101 		struct sge_qset *qs = &sc->sge.qs[i];
1102 		struct sge_txq *txq = &qs->txq[TXQ_ETH];
1103 
1104 		callout_drain(&txq->txq_watchdog);
1105 		callout_drain(&txq->txq_timer);
1106 	}
1107 
1108 	PORT_LOCK_DEINIT(p);
1109 	if_free(p->ifp);
1110 	p->ifp = NULL;
1111 
1112 	ADAPTER_LOCK(sc);
1113 	CLR_BUSY(sc);
1114 	wakeup_one(&sc->flags);
1115 	ADAPTER_UNLOCK(sc);
1116 	return (0);
1117 }
1118 
1119 void
t3_fatal_err(struct adapter * sc)1120 t3_fatal_err(struct adapter *sc)
1121 {
1122 	u_int fw_status[4];
1123 
1124 	if (sc->flags & FULL_INIT_DONE) {
1125 		t3_sge_stop(sc);
1126 		t3_write_reg(sc, A_XGM_TX_CTRL, 0);
1127 		t3_write_reg(sc, A_XGM_RX_CTRL, 0);
1128 		t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0);
1129 		t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0);
1130 		t3_intr_disable(sc);
1131 	}
1132 	device_printf(sc->dev,"encountered fatal error, operation suspended\n");
1133 	if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status))
1134 		device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
1135 		    fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
1136 }
1137 
1138 int
t3_os_find_pci_capability(adapter_t * sc,int cap)1139 t3_os_find_pci_capability(adapter_t *sc, int cap)
1140 {
1141 	device_t dev;
1142 	struct pci_devinfo *dinfo;
1143 	pcicfgregs *cfg;
1144 	uint32_t status;
1145 	uint8_t ptr;
1146 
1147 	dev = sc->dev;
1148 	dinfo = device_get_ivars(dev);
1149 	cfg = &dinfo->cfg;
1150 
1151 	status = pci_read_config(dev, PCIR_STATUS, 2);
1152 	if (!(status & PCIM_STATUS_CAPPRESENT))
1153 		return (0);
1154 
1155 	switch (cfg->hdrtype & PCIM_HDRTYPE) {
1156 	case 0:
1157 	case 1:
1158 		ptr = PCIR_CAP_PTR;
1159 		break;
1160 	case 2:
1161 		ptr = PCIR_CAP_PTR_2;
1162 		break;
1163 	default:
1164 		return (0);
1165 		break;
1166 	}
1167 	ptr = pci_read_config(dev, ptr, 1);
1168 
1169 	while (ptr != 0) {
1170 		if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
1171 			return (ptr);
1172 		ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1173 	}
1174 
1175 	return (0);
1176 }
1177 
1178 int
t3_os_pci_save_state(struct adapter * sc)1179 t3_os_pci_save_state(struct adapter *sc)
1180 {
1181 	device_t dev;
1182 	struct pci_devinfo *dinfo;
1183 
1184 	dev = sc->dev;
1185 	dinfo = device_get_ivars(dev);
1186 
1187 	pci_cfg_save(dev, dinfo, 0);
1188 	return (0);
1189 }
1190 
1191 int
t3_os_pci_restore_state(struct adapter * sc)1192 t3_os_pci_restore_state(struct adapter *sc)
1193 {
1194 	device_t dev;
1195 	struct pci_devinfo *dinfo;
1196 
1197 	dev = sc->dev;
1198 	dinfo = device_get_ivars(dev);
1199 
1200 	pci_cfg_restore(dev, dinfo);
1201 	return (0);
1202 }
1203 
1204 /**
1205  *	t3_os_link_changed - handle link status changes
1206  *	@sc: the adapter associated with the link change
1207  *	@port_id: the port index whose link status has changed
1208  *	@link_status: the new status of the link
1209  *	@speed: the new speed setting
1210  *	@duplex: the new duplex setting
1211  *	@fc: the new flow-control setting
1212  *
1213  *	This is the OS-dependent handler for link status changes.  The OS
1214  *	neutral handler takes care of most of the processing for these events,
1215  *	then calls this handler for any OS-specific processing.
1216  */
1217 void
t3_os_link_changed(adapter_t * adapter,int port_id,int link_status,int speed,int duplex,int fc,int mac_was_reset)1218 t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
1219      int duplex, int fc, int mac_was_reset)
1220 {
1221 	struct port_info *pi = &adapter->port[port_id];
1222 	if_t ifp = pi->ifp;
1223 
1224 	/* no race with detach, so ifp should always be good */
1225 	KASSERT(ifp, ("%s: if detached.", __func__));
1226 
1227 	/* Reapply mac settings if they were lost due to a reset */
1228 	if (mac_was_reset) {
1229 		PORT_LOCK(pi);
1230 		cxgb_update_mac_settings(pi);
1231 		PORT_UNLOCK(pi);
1232 	}
1233 
1234 	if (link_status) {
1235 		if_setbaudrate(ifp, IF_Mbps(speed));
1236 		if_link_state_change(ifp, LINK_STATE_UP);
1237 	} else
1238 		if_link_state_change(ifp, LINK_STATE_DOWN);
1239 }
1240 
1241 /**
1242  *	t3_os_phymod_changed - handle PHY module changes
1243  *	@phy: the PHY reporting the module change
1244  *	@mod_type: new module type
1245  *
1246  *	This is the OS-dependent handler for PHY module changes.  It is
1247  *	invoked when a PHY module is removed or inserted for any OS-specific
1248  *	processing.
1249  */
t3_os_phymod_changed(struct adapter * adap,int port_id)1250 void t3_os_phymod_changed(struct adapter *adap, int port_id)
1251 {
1252 	static const char *mod_str[] = {
1253 		NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX-L", "unknown"
1254 	};
1255 	struct port_info *pi = &adap->port[port_id];
1256 	int mod = pi->phy.modtype;
1257 
1258 	if (mod != pi->media.ifm_cur->ifm_data)
1259 		cxgb_build_medialist(pi);
1260 
1261 	if (mod == phy_modtype_none)
1262 		if_printf(pi->ifp, "PHY module unplugged\n");
1263 	else {
1264 		KASSERT(mod < ARRAY_SIZE(mod_str),
1265 			("invalid PHY module type %d", mod));
1266 		if_printf(pi->ifp, "%s PHY module inserted\n", mod_str[mod]);
1267 	}
1268 }
1269 
1270 void
t3_os_set_hw_addr(adapter_t * adapter,int port_idx,u8 hw_addr[])1271 t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
1272 {
1273 
1274 	/*
1275 	 * The ifnet might not be allocated before this gets called,
1276 	 * as this is called early on in attach by t3_prep_adapter
1277 	 * save the address off in the port structure
1278 	 */
1279 	if (cxgb_debug)
1280 		printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":");
1281 	bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
1282 }
1283 
1284 /*
1285  * Programs the XGMAC based on the settings in the ifnet.  These settings
1286  * include MTU, MAC address, mcast addresses, etc.
1287  */
1288 static void
cxgb_update_mac_settings(struct port_info * p)1289 cxgb_update_mac_settings(struct port_info *p)
1290 {
1291 	if_t ifp = p->ifp;
1292 	struct t3_rx_mode rm;
1293 	struct cmac *mac = &p->mac;
1294 	int mtu, hwtagging;
1295 
1296 	PORT_LOCK_ASSERT_OWNED(p);
1297 
1298 	bcopy(if_getlladdr(ifp), p->hw_addr, ETHER_ADDR_LEN);
1299 
1300 	mtu = if_getmtu(ifp);
1301 	if (if_getcapenable(ifp) & IFCAP_VLAN_MTU)
1302 		mtu += ETHER_VLAN_ENCAP_LEN;
1303 
1304 	hwtagging = (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0;
1305 
1306 	t3_mac_set_mtu(mac, mtu);
1307 	t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging);
1308 	t3_mac_set_address(mac, 0, p->hw_addr);
1309 	t3_init_rx_mode(&rm, p);
1310 	t3_mac_set_rx_mode(mac, &rm);
1311 }
1312 
1313 
1314 static int
await_mgmt_replies(struct adapter * adap,unsigned long init_cnt,unsigned long n)1315 await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
1316 			      unsigned long n)
1317 {
1318 	int attempts = 5;
1319 
1320 	while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
1321 		if (!--attempts)
1322 			return (ETIMEDOUT);
1323 		t3_os_sleep(10);
1324 	}
1325 	return 0;
1326 }
1327 
1328 static int
init_tp_parity(struct adapter * adap)1329 init_tp_parity(struct adapter *adap)
1330 {
1331 	int i;
1332 	struct mbuf *m;
1333 	struct cpl_set_tcb_field *greq;
1334 	unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
1335 
1336 	t3_tp_set_offload_mode(adap, 1);
1337 
1338 	for (i = 0; i < 16; i++) {
1339 		struct cpl_smt_write_req *req;
1340 
1341 		m = m_gethdr(M_WAITOK, MT_DATA);
1342 		req = mtod(m, struct cpl_smt_write_req *);
1343 		m->m_len = m->m_pkthdr.len = sizeof(*req);
1344 		memset(req, 0, sizeof(*req));
1345 		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1346 		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
1347 		req->iff = i;
1348 		t3_mgmt_tx(adap, m);
1349 	}
1350 
1351 	for (i = 0; i < 2048; i++) {
1352 		struct cpl_l2t_write_req *req;
1353 
1354 		m = m_gethdr(M_WAITOK, MT_DATA);
1355 		req = mtod(m, struct cpl_l2t_write_req *);
1356 		m->m_len = m->m_pkthdr.len = sizeof(*req);
1357 		memset(req, 0, sizeof(*req));
1358 		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1359 		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
1360 		req->params = htonl(V_L2T_W_IDX(i));
1361 		t3_mgmt_tx(adap, m);
1362 	}
1363 
1364 	for (i = 0; i < 2048; i++) {
1365 		struct cpl_rte_write_req *req;
1366 
1367 		m = m_gethdr(M_WAITOK, MT_DATA);
1368 		req = mtod(m, struct cpl_rte_write_req *);
1369 		m->m_len = m->m_pkthdr.len = sizeof(*req);
1370 		memset(req, 0, sizeof(*req));
1371 		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1372 		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
1373 		req->l2t_idx = htonl(V_L2T_W_IDX(i));
1374 		t3_mgmt_tx(adap, m);
1375 	}
1376 
1377 	m = m_gethdr(M_WAITOK, MT_DATA);
1378 	greq = mtod(m, struct cpl_set_tcb_field *);
1379 	m->m_len = m->m_pkthdr.len = sizeof(*greq);
1380 	memset(greq, 0, sizeof(*greq));
1381 	greq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1382 	OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
1383 	greq->mask = htobe64(1);
1384 	t3_mgmt_tx(adap, m);
1385 
1386 	i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
1387 	t3_tp_set_offload_mode(adap, 0);
1388 	return (i);
1389 }
1390 
1391 /**
1392  *	setup_rss - configure Receive Side Steering (per-queue connection demux)
1393  *	@adap: the adapter
1394  *
1395  *	Sets up RSS to distribute packets to multiple receive queues.  We
1396  *	configure the RSS CPU lookup table to distribute to the number of HW
1397  *	receive queues, and the response queue lookup table to narrow that
1398  *	down to the response queues actually configured for each port.
1399  *	We always configure the RSS mapping for two ports since the mapping
1400  *	table has plenty of entries.
1401  */
1402 static void
setup_rss(adapter_t * adap)1403 setup_rss(adapter_t *adap)
1404 {
1405 	int i;
1406 	u_int nq[2];
1407 	uint8_t cpus[SGE_QSETS + 1];
1408 	uint16_t rspq_map[RSS_TABLE_SIZE];
1409 
1410 	for (i = 0; i < SGE_QSETS; ++i)
1411 		cpus[i] = i;
1412 	cpus[SGE_QSETS] = 0xff;
1413 
1414 	nq[0] = nq[1] = 0;
1415 	for_each_port(adap, i) {
1416 		const struct port_info *pi = adap2pinfo(adap, i);
1417 
1418 		nq[pi->tx_chan] += pi->nqsets;
1419 	}
1420 	for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
1421 		rspq_map[i] = nq[0] ? i % nq[0] : 0;
1422 		rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0;
1423 	}
1424 
1425 	/* Calculate the reverse RSS map table */
1426 	for (i = 0; i < SGE_QSETS; ++i)
1427 		adap->rrss_map[i] = 0xff;
1428 	for (i = 0; i < RSS_TABLE_SIZE; ++i)
1429 		if (adap->rrss_map[rspq_map[i]] == 0xff)
1430 			adap->rrss_map[rspq_map[i]] = i;
1431 
1432 	t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
1433 		      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN |
1434 	              F_RRCPLMAPEN | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ,
1435 	              cpus, rspq_map);
1436 
1437 }
1438 static void
send_pktsched_cmd(struct adapter * adap,int sched,int qidx,int lo,int hi,int port)1439 send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
1440 			      int hi, int port)
1441 {
1442 	struct mbuf *m;
1443 	struct mngt_pktsched_wr *req;
1444 
1445 	m = m_gethdr(M_NOWAIT, MT_DATA);
1446 	if (m) {
1447 		req = mtod(m, struct mngt_pktsched_wr *);
1448 		req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
1449 		req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
1450 		req->sched = sched;
1451 		req->idx = qidx;
1452 		req->min = lo;
1453 		req->max = hi;
1454 		req->binding = port;
1455 		m->m_len = m->m_pkthdr.len = sizeof(*req);
1456 		t3_mgmt_tx(adap, m);
1457 	}
1458 }
1459 
1460 static void
bind_qsets(adapter_t * sc)1461 bind_qsets(adapter_t *sc)
1462 {
1463 	int i, j;
1464 
1465 	for (i = 0; i < (sc)->params.nports; ++i) {
1466 		const struct port_info *pi = adap2pinfo(sc, i);
1467 
1468 		for (j = 0; j < pi->nqsets; ++j) {
1469 			send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
1470 					  -1, pi->tx_chan);
1471 
1472 		}
1473 	}
1474 }
1475 
1476 static void
update_tpeeprom(struct adapter * adap)1477 update_tpeeprom(struct adapter *adap)
1478 {
1479 	const struct firmware *tpeeprom;
1480 
1481 	uint32_t version;
1482 	unsigned int major, minor;
1483 	int ret, len;
1484 	char rev, name[32];
1485 
1486 	t3_seeprom_read(adap, TP_SRAM_OFFSET, &version);
1487 
1488 	major = G_TP_VERSION_MAJOR(version);
1489 	minor = G_TP_VERSION_MINOR(version);
1490 	if (major == TP_VERSION_MAJOR  && minor == TP_VERSION_MINOR)
1491 		return;
1492 
1493 	rev = t3rev2char(adap);
1494 	snprintf(name, sizeof(name), TPEEPROM_NAME, rev);
1495 
1496 	tpeeprom = firmware_get(name);
1497 	if (tpeeprom == NULL) {
1498 		device_printf(adap->dev,
1499 			      "could not load TP EEPROM: unable to load %s\n",
1500 			      name);
1501 		return;
1502 	}
1503 
1504 	len = tpeeprom->datasize - 4;
1505 
1506 	ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize);
1507 	if (ret)
1508 		goto release_tpeeprom;
1509 
1510 	if (len != TP_SRAM_LEN) {
1511 		device_printf(adap->dev,
1512 			      "%s length is wrong len=%d expected=%d\n", name,
1513 			      len, TP_SRAM_LEN);
1514 		return;
1515 	}
1516 
1517 	ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize,
1518 	    TP_SRAM_OFFSET);
1519 
1520 	if (!ret) {
1521 		device_printf(adap->dev,
1522 			"Protocol SRAM image updated in EEPROM to %d.%d.%d\n",
1523 			 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1524 	} else
1525 		device_printf(adap->dev,
1526 			      "Protocol SRAM image update in EEPROM failed\n");
1527 
1528 release_tpeeprom:
1529 	firmware_put(tpeeprom, FIRMWARE_UNLOAD);
1530 
1531 	return;
1532 }
1533 
1534 static int
update_tpsram(struct adapter * adap)1535 update_tpsram(struct adapter *adap)
1536 {
1537 	const struct firmware *tpsram;
1538 	int ret;
1539 	char rev, name[32];
1540 
1541 	rev = t3rev2char(adap);
1542 	snprintf(name, sizeof(name), TPSRAM_NAME, rev);
1543 
1544 	update_tpeeprom(adap);
1545 
1546 	tpsram = firmware_get(name);
1547 	if (tpsram == NULL){
1548 		device_printf(adap->dev, "could not load TP SRAM\n");
1549 		return (EINVAL);
1550 	} else
1551 		device_printf(adap->dev, "updating TP SRAM\n");
1552 
1553 	ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize);
1554 	if (ret)
1555 		goto release_tpsram;
1556 
1557 	ret = t3_set_proto_sram(adap, tpsram->data);
1558 	if (ret)
1559 		device_printf(adap->dev, "loading protocol SRAM failed\n");
1560 
1561 release_tpsram:
1562 	firmware_put(tpsram, FIRMWARE_UNLOAD);
1563 
1564 	return ret;
1565 }
1566 
1567 /**
1568  *	cxgb_up - enable the adapter
1569  *	@adap: adapter being enabled
1570  *
1571  *	Called when the first port is enabled, this function performs the
1572  *	actions necessary to make an adapter operational, such as completing
1573  *	the initialization of HW modules, and enabling interrupts.
1574  */
1575 static int
cxgb_up(struct adapter * sc)1576 cxgb_up(struct adapter *sc)
1577 {
1578 	int err = 0;
1579 	unsigned int mxf = t3_mc5_size(&sc->mc5) - MC5_MIN_TIDS;
1580 
1581 	KASSERT(sc->open_device_map == 0, ("%s: device(s) already open (%x)",
1582 					   __func__, sc->open_device_map));
1583 
1584 	if ((sc->flags & FULL_INIT_DONE) == 0) {
1585 
1586 		ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1587 
1588 		if ((sc->flags & FW_UPTODATE) == 0)
1589 			if ((err = upgrade_fw(sc)))
1590 				goto out;
1591 
1592 		if ((sc->flags & TPS_UPTODATE) == 0)
1593 			if ((err = update_tpsram(sc)))
1594 				goto out;
1595 
1596 		if (is_offload(sc) && nfilters != 0) {
1597 			sc->params.mc5.nservers = 0;
1598 
1599 			if (nfilters < 0)
1600 				sc->params.mc5.nfilters = mxf;
1601 			else
1602 				sc->params.mc5.nfilters = min(nfilters, mxf);
1603 		}
1604 
1605 		err = t3_init_hw(sc, 0);
1606 		if (err)
1607 			goto out;
1608 
1609 		t3_set_reg_field(sc, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1610 		t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1611 
1612 		err = setup_sge_qsets(sc);
1613 		if (err)
1614 			goto out;
1615 
1616 		alloc_filters(sc);
1617 		setup_rss(sc);
1618 
1619 		t3_add_configured_sysctls(sc);
1620 		sc->flags |= FULL_INIT_DONE;
1621 	}
1622 
1623 	t3_intr_clear(sc);
1624 	t3_sge_start(sc);
1625 	t3_intr_enable(sc);
1626 
1627 	if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) &&
1628 	    is_offload(sc) && init_tp_parity(sc) == 0)
1629 		sc->flags |= TP_PARITY_INIT;
1630 
1631 	if (sc->flags & TP_PARITY_INIT) {
1632 		t3_write_reg(sc, A_TP_INT_CAUSE, F_CMCACHEPERR | F_ARPLUTPERR);
1633 		t3_write_reg(sc, A_TP_INT_ENABLE, 0x7fbfffff);
1634 	}
1635 
1636 	if (!(sc->flags & QUEUES_BOUND)) {
1637 		bind_qsets(sc);
1638 		setup_hw_filters(sc);
1639 		sc->flags |= QUEUES_BOUND;
1640 	}
1641 
1642 	t3_sge_reset_adapter(sc);
1643 out:
1644 	return (err);
1645 }
1646 
1647 /*
1648  * Called when the last open device is closed.  Does NOT undo all of cxgb_up's
1649  * work.  Specifically, the resources grabbed under FULL_INIT_DONE are released
1650  * during controller_detach, not here.
1651  */
1652 static void
cxgb_down(struct adapter * sc)1653 cxgb_down(struct adapter *sc)
1654 {
1655 	t3_sge_stop(sc);
1656 	t3_intr_disable(sc);
1657 }
1658 
1659 /*
1660  * if_init for cxgb ports.
1661  */
1662 static void
cxgb_init(void * arg)1663 cxgb_init(void *arg)
1664 {
1665 	struct port_info *p = arg;
1666 	struct adapter *sc = p->adapter;
1667 
1668 	ADAPTER_LOCK(sc);
1669 	cxgb_init_locked(p); /* releases adapter lock */
1670 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1671 }
1672 
1673 static int
cxgb_init_locked(struct port_info * p)1674 cxgb_init_locked(struct port_info *p)
1675 {
1676 	struct adapter *sc = p->adapter;
1677 	if_t ifp = p->ifp;
1678 	struct cmac *mac = &p->mac;
1679 	int i, rc = 0, may_sleep = 0, gave_up_lock = 0;
1680 
1681 	ADAPTER_LOCK_ASSERT_OWNED(sc);
1682 
1683 	while (!IS_DOOMED(p) && IS_BUSY(sc)) {
1684 		gave_up_lock = 1;
1685 		if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbinit", 0)) {
1686 			rc = EINTR;
1687 			goto done;
1688 		}
1689 	}
1690 	if (IS_DOOMED(p)) {
1691 		rc = ENXIO;
1692 		goto done;
1693 	}
1694 	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1695 
1696 	/*
1697 	 * The code that runs during one-time adapter initialization can sleep
1698 	 * so it's important not to hold any locks across it.
1699 	 */
1700 	may_sleep = sc->flags & FULL_INIT_DONE ? 0 : 1;
1701 
1702 	if (may_sleep) {
1703 		SET_BUSY(sc);
1704 		gave_up_lock = 1;
1705 		ADAPTER_UNLOCK(sc);
1706 	}
1707 
1708 	if (sc->open_device_map == 0 && ((rc = cxgb_up(sc)) != 0))
1709 			goto done;
1710 
1711 	PORT_LOCK(p);
1712 	if (isset(&sc->open_device_map, p->port_id) &&
1713 	    (if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
1714 		PORT_UNLOCK(p);
1715 		goto done;
1716 	}
1717 	t3_port_intr_enable(sc, p->port_id);
1718 	if (!mac->multiport)
1719 		t3_mac_init(mac);
1720 	cxgb_update_mac_settings(p);
1721 	t3_link_start(&p->phy, mac, &p->link_config);
1722 	t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
1723 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
1724 	PORT_UNLOCK(p);
1725 
1726 	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1727 		struct sge_qset *qs = &sc->sge.qs[i];
1728 		struct sge_txq *txq = &qs->txq[TXQ_ETH];
1729 
1730 		callout_reset_on(&txq->txq_watchdog, hz, cxgb_tx_watchdog, qs,
1731 				 txq->txq_watchdog.c_cpu);
1732 	}
1733 
1734 	/* all ok */
1735 	setbit(&sc->open_device_map, p->port_id);
1736 	callout_reset(&p->link_check_ch,
1737 	    p->phy.caps & SUPPORTED_LINK_IRQ ?  hz * 3 : hz / 4,
1738 	    link_check_callout, p);
1739 
1740 done:
1741 	if (may_sleep) {
1742 		ADAPTER_LOCK(sc);
1743 		KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1744 		CLR_BUSY(sc);
1745 	}
1746 	if (gave_up_lock)
1747 		wakeup_one(&sc->flags);
1748 	ADAPTER_UNLOCK(sc);
1749 	return (rc);
1750 }
1751 
1752 static int
cxgb_uninit_locked(struct port_info * p)1753 cxgb_uninit_locked(struct port_info *p)
1754 {
1755 	struct adapter *sc = p->adapter;
1756 	int rc;
1757 
1758 	ADAPTER_LOCK_ASSERT_OWNED(sc);
1759 
1760 	while (!IS_DOOMED(p) && IS_BUSY(sc)) {
1761 		if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbunin", 0)) {
1762 			rc = EINTR;
1763 			goto done;
1764 		}
1765 	}
1766 	if (IS_DOOMED(p)) {
1767 		rc = ENXIO;
1768 		goto done;
1769 	}
1770 	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1771 	SET_BUSY(sc);
1772 	ADAPTER_UNLOCK(sc);
1773 
1774 	rc = cxgb_uninit_synchronized(p);
1775 
1776 	ADAPTER_LOCK(sc);
1777 	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1778 	CLR_BUSY(sc);
1779 	wakeup_one(&sc->flags);
1780 done:
1781 	ADAPTER_UNLOCK(sc);
1782 	return (rc);
1783 }
1784 
1785 /*
1786  * Called on "ifconfig down", and from port_detach
1787  */
1788 static int
cxgb_uninit_synchronized(struct port_info * pi)1789 cxgb_uninit_synchronized(struct port_info *pi)
1790 {
1791 	struct adapter *sc = pi->adapter;
1792 	if_t ifp = pi->ifp;
1793 
1794 	/*
1795 	 * taskqueue_drain may cause a deadlock if the adapter lock is held.
1796 	 */
1797 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1798 
1799 	/*
1800 	 * Clear this port's bit from the open device map, and then drain all
1801 	 * the tasks that can access/manipulate this port's port_info or ifp.
1802 	 * We disable this port's interrupts here and so the slow/ext
1803 	 * interrupt tasks won't be enqueued.  The tick task will continue to
1804 	 * be enqueued every second but the runs after this drain will not see
1805 	 * this port in the open device map.
1806 	 *
1807 	 * A well behaved task must take open_device_map into account and ignore
1808 	 * ports that are not open.
1809 	 */
1810 	clrbit(&sc->open_device_map, pi->port_id);
1811 	t3_port_intr_disable(sc, pi->port_id);
1812 	taskqueue_drain(sc->tq, &sc->slow_intr_task);
1813 	taskqueue_drain(sc->tq, &sc->tick_task);
1814 
1815 	callout_drain(&pi->link_check_ch);
1816 	taskqueue_drain(sc->tq, &pi->link_check_task);
1817 
1818 	PORT_LOCK(pi);
1819 	if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1820 
1821 	/* disable pause frames */
1822 	t3_set_reg_field(sc, A_XGM_TX_CFG + pi->mac.offset, F_TXPAUSEEN, 0);
1823 
1824 	/* Reset RX FIFO HWM */
1825 	t3_set_reg_field(sc, A_XGM_RXFIFO_CFG +  pi->mac.offset,
1826 			 V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM), 0);
1827 
1828 	DELAY(100 * 1000);
1829 
1830 	/* Wait for TXFIFO empty */
1831 	t3_wait_op_done(sc, A_XGM_TXFIFO_CFG + pi->mac.offset,
1832 			F_TXFIFO_EMPTY, 1, 20, 5);
1833 
1834 	DELAY(100 * 1000);
1835 	t3_mac_disable(&pi->mac, MAC_DIRECTION_RX);
1836 
1837 	pi->phy.ops->power_down(&pi->phy, 1);
1838 
1839 	PORT_UNLOCK(pi);
1840 
1841 	pi->link_config.link_ok = 0;
1842 	t3_os_link_changed(sc, pi->port_id, 0, 0, 0, 0, 0);
1843 
1844 	if (sc->open_device_map == 0)
1845 		cxgb_down(pi->adapter);
1846 
1847 	return (0);
1848 }
1849 
1850 /*
1851  * Mark lro enabled or disabled in all qsets for this port
1852  */
1853 static int
cxgb_set_lro(struct port_info * p,int enabled)1854 cxgb_set_lro(struct port_info *p, int enabled)
1855 {
1856 	int i;
1857 	struct adapter *adp = p->adapter;
1858 	struct sge_qset *q;
1859 
1860 	for (i = 0; i < p->nqsets; i++) {
1861 		q = &adp->sge.qs[p->first_qset + i];
1862 		q->lro.enabled = (enabled != 0);
1863 	}
1864 	return (0);
1865 }
1866 
1867 static int
cxgb_ioctl(if_t ifp,unsigned long command,caddr_t data)1868 cxgb_ioctl(if_t ifp, unsigned long command, caddr_t data)
1869 {
1870 	struct port_info *p = if_getsoftc(ifp);
1871 	struct adapter *sc = p->adapter;
1872 	struct ifreq *ifr = (struct ifreq *)data;
1873 	int flags, error = 0, mtu;
1874 	uint32_t mask;
1875 
1876 	switch (command) {
1877 	case SIOCSIFMTU:
1878 		ADAPTER_LOCK(sc);
1879 		error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
1880 		if (error) {
1881 fail:
1882 			ADAPTER_UNLOCK(sc);
1883 			return (error);
1884 		}
1885 
1886 		mtu = ifr->ifr_mtu;
1887 		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) {
1888 			error = EINVAL;
1889 		} else {
1890 			if_setmtu(ifp, mtu);
1891 			PORT_LOCK(p);
1892 			cxgb_update_mac_settings(p);
1893 			PORT_UNLOCK(p);
1894 		}
1895 		ADAPTER_UNLOCK(sc);
1896 		break;
1897 	case SIOCSIFFLAGS:
1898 		ADAPTER_LOCK(sc);
1899 		if (IS_DOOMED(p)) {
1900 			error = ENXIO;
1901 			goto fail;
1902 		}
1903 		if (if_getflags(ifp) & IFF_UP) {
1904 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1905 				flags = p->if_flags;
1906 				if (((if_getflags(ifp) ^ flags) & IFF_PROMISC) ||
1907 				    ((if_getflags(ifp) ^ flags) & IFF_ALLMULTI)) {
1908 					if (IS_BUSY(sc)) {
1909 						error = EBUSY;
1910 						goto fail;
1911 					}
1912 					PORT_LOCK(p);
1913 					cxgb_update_mac_settings(p);
1914 					PORT_UNLOCK(p);
1915 				}
1916 				ADAPTER_UNLOCK(sc);
1917 			} else
1918 				error = cxgb_init_locked(p);
1919 			p->if_flags = if_getflags(ifp);
1920 		} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1921 			error = cxgb_uninit_locked(p);
1922 		else
1923 			ADAPTER_UNLOCK(sc);
1924 
1925 		ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1926 		break;
1927 	case SIOCADDMULTI:
1928 	case SIOCDELMULTI:
1929 		ADAPTER_LOCK(sc);
1930 		error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
1931 		if (error)
1932 			goto fail;
1933 
1934 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1935 			PORT_LOCK(p);
1936 			cxgb_update_mac_settings(p);
1937 			PORT_UNLOCK(p);
1938 		}
1939 		ADAPTER_UNLOCK(sc);
1940 
1941 		break;
1942 	case SIOCSIFCAP:
1943 		ADAPTER_LOCK(sc);
1944 		error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
1945 		if (error)
1946 			goto fail;
1947 
1948 		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
1949 		if (mask & IFCAP_TXCSUM) {
1950 			if_togglecapenable(ifp, IFCAP_TXCSUM);
1951 			if_togglehwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_IP);
1952 
1953 			if (IFCAP_TSO4 & if_getcapenable(ifp) &&
1954 			    !(IFCAP_TXCSUM & if_getcapenable(ifp))) {
1955 				mask &= ~IFCAP_TSO4;
1956 				if_setcapenablebit(ifp, 0, IFCAP_TSO4);
1957 				if_printf(ifp,
1958 				    "tso4 disabled due to -txcsum.\n");
1959 			}
1960 		}
1961 		if (mask & IFCAP_TXCSUM_IPV6) {
1962 			if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6);
1963 			if_togglehwassist(ifp, CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1964 
1965 			if (IFCAP_TSO6 & if_getcapenable(ifp) &&
1966 			    !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
1967 				mask &= ~IFCAP_TSO6;
1968 				if_setcapenablebit(ifp, 0, IFCAP_TSO6);
1969 				if_printf(ifp,
1970 				    "tso6 disabled due to -txcsum6.\n");
1971 			}
1972 		}
1973 		if (mask & IFCAP_RXCSUM)
1974 			if_togglecapenable(ifp, IFCAP_RXCSUM);
1975 		if (mask & IFCAP_RXCSUM_IPV6)
1976 			if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6);
1977 
1978 		/*
1979 		 * Note that we leave CSUM_TSO alone (it is always set).  The
1980 		 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1981 		 * sending a TSO request our way, so it's sufficient to toggle
1982 		 * IFCAP_TSOx only.
1983 		 */
1984 		if (mask & IFCAP_TSO4) {
1985 			if (!(IFCAP_TSO4 & if_getcapenable(ifp)) &&
1986 			    !(IFCAP_TXCSUM & if_getcapenable(ifp))) {
1987 				if_printf(ifp, "enable txcsum first.\n");
1988 				error = EAGAIN;
1989 				goto fail;
1990 			}
1991 			if_togglecapenable(ifp, IFCAP_TSO4);
1992 		}
1993 		if (mask & IFCAP_TSO6) {
1994 			if (!(IFCAP_TSO6 & if_getcapenable(ifp)) &&
1995 			    !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
1996 				if_printf(ifp, "enable txcsum6 first.\n");
1997 				error = EAGAIN;
1998 				goto fail;
1999 			}
2000 			if_togglecapenable(ifp, IFCAP_TSO6);
2001 		}
2002 		if (mask & IFCAP_LRO) {
2003 			if_togglecapenable(ifp, IFCAP_LRO);
2004 
2005 			/* Safe to do this even if cxgb_up not called yet */
2006 			cxgb_set_lro(p, if_getcapenable(ifp) & IFCAP_LRO);
2007 		}
2008 #ifdef TCP_OFFLOAD
2009 		if (mask & IFCAP_TOE4) {
2010 			int enable = (if_getcapenable(ifp) ^ mask) & IFCAP_TOE4;
2011 
2012 			error = toe_capability(p, enable);
2013 			if (error == 0)
2014 				if_togglecapenable(ifp, mask);
2015 		}
2016 #endif
2017 		if (mask & IFCAP_VLAN_HWTAGGING) {
2018 			if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
2019 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2020 				PORT_LOCK(p);
2021 				cxgb_update_mac_settings(p);
2022 				PORT_UNLOCK(p);
2023 			}
2024 		}
2025 		if (mask & IFCAP_VLAN_MTU) {
2026 			if_togglecapenable(ifp, IFCAP_VLAN_MTU);
2027 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2028 				PORT_LOCK(p);
2029 				cxgb_update_mac_settings(p);
2030 				PORT_UNLOCK(p);
2031 			}
2032 		}
2033 		if (mask & IFCAP_VLAN_HWTSO)
2034 			if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
2035 		if (mask & IFCAP_VLAN_HWCSUM)
2036 			if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM);
2037 
2038 #ifdef VLAN_CAPABILITIES
2039 		VLAN_CAPABILITIES(ifp);
2040 #endif
2041 		ADAPTER_UNLOCK(sc);
2042 		break;
2043 	case SIOCSIFMEDIA:
2044 	case SIOCGIFMEDIA:
2045 		error = ifmedia_ioctl(ifp, ifr, &p->media, command);
2046 		break;
2047 	default:
2048 		error = ether_ioctl(ifp, command, data);
2049 	}
2050 
2051 	return (error);
2052 }
2053 
2054 static int
cxgb_media_change(if_t ifp)2055 cxgb_media_change(if_t ifp)
2056 {
2057 	return (EOPNOTSUPP);
2058 }
2059 
2060 /*
2061  * Translates phy->modtype to the correct Ethernet media subtype.
2062  */
2063 static int
cxgb_ifm_type(int mod)2064 cxgb_ifm_type(int mod)
2065 {
2066 	switch (mod) {
2067 	case phy_modtype_sr:
2068 		return (IFM_10G_SR);
2069 	case phy_modtype_lr:
2070 		return (IFM_10G_LR);
2071 	case phy_modtype_lrm:
2072 		return (IFM_10G_LRM);
2073 	case phy_modtype_twinax:
2074 		return (IFM_10G_TWINAX);
2075 	case phy_modtype_twinax_long:
2076 		return (IFM_10G_TWINAX_LONG);
2077 	case phy_modtype_none:
2078 		return (IFM_NONE);
2079 	case phy_modtype_unknown:
2080 		return (IFM_UNKNOWN);
2081 	}
2082 
2083 	KASSERT(0, ("%s: modtype %d unknown", __func__, mod));
2084 	return (IFM_UNKNOWN);
2085 }
2086 
2087 /*
2088  * Rebuilds the ifmedia list for this port, and sets the current media.
2089  */
2090 static void
cxgb_build_medialist(struct port_info * p)2091 cxgb_build_medialist(struct port_info *p)
2092 {
2093 	struct cphy *phy = &p->phy;
2094 	struct ifmedia *media = &p->media;
2095 	int mod = phy->modtype;
2096 	int m = IFM_ETHER | IFM_FDX;
2097 
2098 	PORT_LOCK(p);
2099 
2100 	ifmedia_removeall(media);
2101 	if (phy->caps & SUPPORTED_TP && phy->caps & SUPPORTED_Autoneg) {
2102 		/* Copper (RJ45) */
2103 
2104 		if (phy->caps & SUPPORTED_10000baseT_Full)
2105 			ifmedia_add(media, m | IFM_10G_T, mod, NULL);
2106 
2107 		if (phy->caps & SUPPORTED_1000baseT_Full)
2108 			ifmedia_add(media, m | IFM_1000_T, mod, NULL);
2109 
2110 		if (phy->caps & SUPPORTED_100baseT_Full)
2111 			ifmedia_add(media, m | IFM_100_TX, mod, NULL);
2112 
2113 		if (phy->caps & SUPPORTED_10baseT_Full)
2114 			ifmedia_add(media, m | IFM_10_T, mod, NULL);
2115 
2116 		ifmedia_add(media, IFM_ETHER | IFM_AUTO, mod, NULL);
2117 		ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2118 
2119 	} else if (phy->caps & SUPPORTED_TP) {
2120 		/* Copper (CX4) */
2121 
2122 		KASSERT(phy->caps & SUPPORTED_10000baseT_Full,
2123 			("%s: unexpected cap 0x%x", __func__, phy->caps));
2124 
2125 		ifmedia_add(media, m | IFM_10G_CX4, mod, NULL);
2126 		ifmedia_set(media, m | IFM_10G_CX4);
2127 
2128 	} else if (phy->caps & SUPPORTED_FIBRE &&
2129 		   phy->caps & SUPPORTED_10000baseT_Full) {
2130 		/* 10G optical (but includes SFP+ twinax) */
2131 
2132 		m |= cxgb_ifm_type(mod);
2133 		if (IFM_SUBTYPE(m) == IFM_NONE)
2134 			m &= ~IFM_FDX;
2135 
2136 		ifmedia_add(media, m, mod, NULL);
2137 		ifmedia_set(media, m);
2138 
2139 	} else if (phy->caps & SUPPORTED_FIBRE &&
2140 		   phy->caps & SUPPORTED_1000baseT_Full) {
2141 		/* 1G optical */
2142 
2143 		/* XXX: Lie and claim to be SX, could actually be any 1G-X */
2144 		ifmedia_add(media, m | IFM_1000_SX, mod, NULL);
2145 		ifmedia_set(media, m | IFM_1000_SX);
2146 
2147 	} else {
2148 		KASSERT(0, ("%s: don't know how to handle 0x%x.", __func__,
2149 			    phy->caps));
2150 	}
2151 
2152 	PORT_UNLOCK(p);
2153 }
2154 
2155 static void
cxgb_media_status(if_t ifp,struct ifmediareq * ifmr)2156 cxgb_media_status(if_t ifp, struct ifmediareq *ifmr)
2157 {
2158 	struct port_info *p = if_getsoftc(ifp);
2159 	struct ifmedia_entry *cur = p->media.ifm_cur;
2160 	int speed = p->link_config.speed;
2161 
2162 	if (cur->ifm_data != p->phy.modtype) {
2163 		cxgb_build_medialist(p);
2164 		cur = p->media.ifm_cur;
2165 	}
2166 
2167 	ifmr->ifm_status = IFM_AVALID;
2168 	if (!p->link_config.link_ok)
2169 		return;
2170 
2171 	ifmr->ifm_status |= IFM_ACTIVE;
2172 
2173 	/*
2174 	 * active and current will differ iff current media is autoselect.  That
2175 	 * can happen only for copper RJ45.
2176 	 */
2177 	if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
2178 		return;
2179 	KASSERT(p->phy.caps & SUPPORTED_TP && p->phy.caps & SUPPORTED_Autoneg,
2180 		("%s: unexpected PHY caps 0x%x", __func__, p->phy.caps));
2181 
2182 	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
2183 	if (speed == SPEED_10000)
2184 		ifmr->ifm_active |= IFM_10G_T;
2185 	else if (speed == SPEED_1000)
2186 		ifmr->ifm_active |= IFM_1000_T;
2187 	else if (speed == SPEED_100)
2188 		ifmr->ifm_active |= IFM_100_TX;
2189 	else if (speed == SPEED_10)
2190 		ifmr->ifm_active |= IFM_10_T;
2191 	else
2192 		KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
2193 			    speed));
2194 }
2195 
2196 static uint64_t
cxgb_get_counter(if_t ifp,ift_counter c)2197 cxgb_get_counter(if_t ifp, ift_counter c)
2198 {
2199 	struct port_info *pi = if_getsoftc(ifp);
2200 	struct adapter *sc = pi->adapter;
2201 	struct cmac *mac = &pi->mac;
2202 	struct mac_stats *mstats = &mac->stats;
2203 
2204 	cxgb_refresh_stats(pi);
2205 
2206 	switch (c) {
2207 	case IFCOUNTER_IPACKETS:
2208 		return (mstats->rx_frames);
2209 
2210 	case IFCOUNTER_IERRORS:
2211 		return (mstats->rx_jabber + mstats->rx_data_errs +
2212 		    mstats->rx_sequence_errs + mstats->rx_runt +
2213 		    mstats->rx_too_long + mstats->rx_mac_internal_errs +
2214 		    mstats->rx_short + mstats->rx_fcs_errs);
2215 
2216 	case IFCOUNTER_OPACKETS:
2217 		return (mstats->tx_frames);
2218 
2219 	case IFCOUNTER_OERRORS:
2220 		return (mstats->tx_excess_collisions + mstats->tx_underrun +
2221 		    mstats->tx_len_errs + mstats->tx_mac_internal_errs +
2222 		    mstats->tx_excess_deferral + mstats->tx_fcs_errs);
2223 
2224 	case IFCOUNTER_COLLISIONS:
2225 		return (mstats->tx_total_collisions);
2226 
2227 	case IFCOUNTER_IBYTES:
2228 		return (mstats->rx_octets);
2229 
2230 	case IFCOUNTER_OBYTES:
2231 		return (mstats->tx_octets);
2232 
2233 	case IFCOUNTER_IMCASTS:
2234 		return (mstats->rx_mcast_frames);
2235 
2236 	case IFCOUNTER_OMCASTS:
2237 		return (mstats->tx_mcast_frames);
2238 
2239 	case IFCOUNTER_IQDROPS:
2240 		return (mstats->rx_cong_drops);
2241 
2242 	case IFCOUNTER_OQDROPS: {
2243 		int i;
2244 		uint64_t drops;
2245 
2246 		drops = 0;
2247 		if (sc->flags & FULL_INIT_DONE) {
2248 			for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
2249 				drops += sc->sge.qs[i].txq[TXQ_ETH].txq_mr->br_drops;
2250 		}
2251 
2252 		return (drops);
2253 
2254 	}
2255 
2256 	default:
2257 		return (if_get_counter_default(ifp, c));
2258 	}
2259 }
2260 
2261 static void
cxgb_async_intr(void * data)2262 cxgb_async_intr(void *data)
2263 {
2264 	adapter_t *sc = data;
2265 
2266 	t3_write_reg(sc, A_PL_INT_ENABLE0, 0);
2267 	(void) t3_read_reg(sc, A_PL_INT_ENABLE0);
2268 	taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
2269 }
2270 
2271 static void
link_check_callout(void * arg)2272 link_check_callout(void *arg)
2273 {
2274 	struct port_info *pi = arg;
2275 	struct adapter *sc = pi->adapter;
2276 
2277 	if (!isset(&sc->open_device_map, pi->port_id))
2278 		return;
2279 
2280 	taskqueue_enqueue(sc->tq, &pi->link_check_task);
2281 }
2282 
2283 static void
check_link_status(void * arg,int pending)2284 check_link_status(void *arg, int pending)
2285 {
2286 	struct port_info *pi = arg;
2287 	struct adapter *sc = pi->adapter;
2288 
2289 	if (!isset(&sc->open_device_map, pi->port_id))
2290 		return;
2291 
2292 	t3_link_changed(sc, pi->port_id);
2293 
2294 	if (pi->link_fault || !(pi->phy.caps & SUPPORTED_LINK_IRQ) ||
2295 	    pi->link_config.link_ok == 0)
2296 		callout_reset(&pi->link_check_ch, hz, link_check_callout, pi);
2297 }
2298 
2299 void
t3_os_link_intr(struct port_info * pi)2300 t3_os_link_intr(struct port_info *pi)
2301 {
2302 	/*
2303 	 * Schedule a link check in the near future.  If the link is flapping
2304 	 * rapidly we'll keep resetting the callout and delaying the check until
2305 	 * things stabilize a bit.
2306 	 */
2307 	callout_reset(&pi->link_check_ch, hz / 4, link_check_callout, pi);
2308 }
2309 
2310 static void
check_t3b2_mac(struct adapter * sc)2311 check_t3b2_mac(struct adapter *sc)
2312 {
2313 	int i;
2314 
2315 	if (sc->flags & CXGB_SHUTDOWN)
2316 		return;
2317 
2318 	for_each_port(sc, i) {
2319 		struct port_info *p = &sc->port[i];
2320 		int status;
2321 #ifdef INVARIANTS
2322 		if_t ifp = p->ifp;
2323 #endif
2324 
2325 		if (!isset(&sc->open_device_map, p->port_id) || p->link_fault ||
2326 		    !p->link_config.link_ok)
2327 			continue;
2328 
2329 		KASSERT(if_getdrvflags(ifp) & IFF_DRV_RUNNING,
2330 			("%s: state mismatch (drv_flags %x, device_map %x)",
2331 			 __func__, if_getdrvflags(ifp), sc->open_device_map));
2332 
2333 		PORT_LOCK(p);
2334 		status = t3b2_mac_watchdog_task(&p->mac);
2335 		if (status == 1)
2336 			p->mac.stats.num_toggled++;
2337 		else if (status == 2) {
2338 			struct cmac *mac = &p->mac;
2339 
2340 			cxgb_update_mac_settings(p);
2341 			t3_link_start(&p->phy, mac, &p->link_config);
2342 			t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2343 			t3_port_intr_enable(sc, p->port_id);
2344 			p->mac.stats.num_resets++;
2345 		}
2346 		PORT_UNLOCK(p);
2347 	}
2348 }
2349 
2350 static void
cxgb_tick(void * arg)2351 cxgb_tick(void *arg)
2352 {
2353 	adapter_t *sc = (adapter_t *)arg;
2354 
2355 	if (sc->flags & CXGB_SHUTDOWN)
2356 		return;
2357 
2358 	taskqueue_enqueue(sc->tq, &sc->tick_task);
2359 	callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
2360 }
2361 
2362 void
cxgb_refresh_stats(struct port_info * pi)2363 cxgb_refresh_stats(struct port_info *pi)
2364 {
2365 	struct timeval tv;
2366 	const struct timeval interval = {0, 250000};    /* 250ms */
2367 
2368 	getmicrotime(&tv);
2369 	timevalsub(&tv, &interval);
2370 	if (timevalcmp(&tv, &pi->last_refreshed, <))
2371 		return;
2372 
2373 	PORT_LOCK(pi);
2374 	t3_mac_update_stats(&pi->mac);
2375 	PORT_UNLOCK(pi);
2376 	getmicrotime(&pi->last_refreshed);
2377 }
2378 
2379 static void
cxgb_tick_handler(void * arg,int count)2380 cxgb_tick_handler(void *arg, int count)
2381 {
2382 	adapter_t *sc = (adapter_t *)arg;
2383 	const struct adapter_params *p = &sc->params;
2384 	int i;
2385 	uint32_t cause, reset;
2386 
2387 	if (sc->flags & CXGB_SHUTDOWN || !(sc->flags & FULL_INIT_DONE))
2388 		return;
2389 
2390 	if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map)
2391 		check_t3b2_mac(sc);
2392 
2393 	cause = t3_read_reg(sc, A_SG_INT_CAUSE) & (F_RSPQSTARVE | F_FLEMPTY);
2394 	if (cause) {
2395 		struct sge_qset *qs = &sc->sge.qs[0];
2396 		uint32_t mask, v;
2397 
2398 		v = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS) & ~0xff00;
2399 
2400 		mask = 1;
2401 		for (i = 0; i < SGE_QSETS; i++) {
2402 			if (v & mask)
2403 				qs[i].rspq.starved++;
2404 			mask <<= 1;
2405 		}
2406 
2407 		mask <<= SGE_QSETS; /* skip RSPQXDISABLED */
2408 
2409 		for (i = 0; i < SGE_QSETS * 2; i++) {
2410 			if (v & mask) {
2411 				qs[i / 2].fl[i % 2].empty++;
2412 			}
2413 			mask <<= 1;
2414 		}
2415 
2416 		/* clear */
2417 		t3_write_reg(sc, A_SG_RSPQ_FL_STATUS, v);
2418 		t3_write_reg(sc, A_SG_INT_CAUSE, cause);
2419 	}
2420 
2421 	for (i = 0; i < sc->params.nports; i++) {
2422 		struct port_info *pi = &sc->port[i];
2423 		struct cmac *mac = &pi->mac;
2424 
2425 		if (!isset(&sc->open_device_map, pi->port_id))
2426 			continue;
2427 
2428 		cxgb_refresh_stats(pi);
2429 
2430 		if (mac->multiport)
2431 			continue;
2432 
2433 		/* Count rx fifo overflows, once per second */
2434 		cause = t3_read_reg(sc, A_XGM_INT_CAUSE + mac->offset);
2435 		reset = 0;
2436 		if (cause & F_RXFIFO_OVERFLOW) {
2437 			mac->stats.rx_fifo_ovfl++;
2438 			reset |= F_RXFIFO_OVERFLOW;
2439 		}
2440 		t3_write_reg(sc, A_XGM_INT_CAUSE + mac->offset, reset);
2441 	}
2442 }
2443 
2444 static void
touch_bars(device_t dev)2445 touch_bars(device_t dev)
2446 {
2447 	/*
2448 	 * Don't enable yet
2449 	 */
2450 #if !defined(__LP64__) && 0
2451 	u32 v;
2452 
2453 	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v);
2454 	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v);
2455 	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v);
2456 	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v);
2457 	pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v);
2458 	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v);
2459 #endif
2460 }
2461 
2462 static int
set_eeprom(struct port_info * pi,const uint8_t * data,int len,int offset)2463 set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset)
2464 {
2465 	uint8_t *buf;
2466 	int err = 0;
2467 	u32 aligned_offset, aligned_len, *p;
2468 	struct adapter *adapter = pi->adapter;
2469 
2470 
2471 	aligned_offset = offset & ~3;
2472 	aligned_len = (len + (offset & 3) + 3) & ~3;
2473 
2474 	if (aligned_offset != offset || aligned_len != len) {
2475 		buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO);
2476 		if (!buf)
2477 			return (ENOMEM);
2478 		err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
2479 		if (!err && aligned_len > 4)
2480 			err = t3_seeprom_read(adapter,
2481 					      aligned_offset + aligned_len - 4,
2482 					      (u32 *)&buf[aligned_len - 4]);
2483 		if (err)
2484 			goto out;
2485 		memcpy(buf + (offset & 3), data, len);
2486 	} else
2487 		buf = (uint8_t *)(uintptr_t)data;
2488 
2489 	err = t3_seeprom_wp(adapter, 0);
2490 	if (err)
2491 		goto out;
2492 
2493 	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2494 		err = t3_seeprom_write(adapter, aligned_offset, *p);
2495 		aligned_offset += 4;
2496 	}
2497 
2498 	if (!err)
2499 		err = t3_seeprom_wp(adapter, 1);
2500 out:
2501 	if (buf != data)
2502 		free(buf, M_DEVBUF);
2503 	return err;
2504 }
2505 
2506 
2507 static int
in_range(int val,int lo,int hi)2508 in_range(int val, int lo, int hi)
2509 {
2510 	return val < 0 || (val <= hi && val >= lo);
2511 }
2512 
2513 static int
cxgb_extension_open(struct cdev * dev,int flags,int fmp,struct thread * td)2514 cxgb_extension_open(struct cdev *dev, int flags, int fmp, struct thread *td)
2515 {
2516        return (0);
2517 }
2518 
2519 static int
cxgb_extension_close(struct cdev * dev,int flags,int fmt,struct thread * td)2520 cxgb_extension_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2521 {
2522        return (0);
2523 }
2524 
2525 static int
cxgb_extension_ioctl(struct cdev * dev,unsigned long cmd,caddr_t data,int fflag,struct thread * td)2526 cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
2527     int fflag, struct thread *td)
2528 {
2529 	int mmd, error = 0;
2530 	struct port_info *pi = dev->si_drv1;
2531 	adapter_t *sc = pi->adapter;
2532 
2533 #ifdef PRIV_SUPPORTED
2534 	if (priv_check(td, PRIV_DRIVER)) {
2535 		if (cxgb_debug)
2536 			printf("user does not have access to privileged ioctls\n");
2537 		return (EPERM);
2538 	}
2539 #else
2540 	if (suser(td)) {
2541 		if (cxgb_debug)
2542 			printf("user does not have access to privileged ioctls\n");
2543 		return (EPERM);
2544 	}
2545 #endif
2546 
2547 	switch (cmd) {
2548 	case CHELSIO_GET_MIIREG: {
2549 		uint32_t val;
2550 		struct cphy *phy = &pi->phy;
2551 		struct ch_mii_data *mid = (struct ch_mii_data *)data;
2552 
2553 		if (!phy->mdio_read)
2554 			return (EOPNOTSUPP);
2555 		if (is_10G(sc)) {
2556 			mmd = mid->phy_id >> 8;
2557 			if (!mmd)
2558 				mmd = MDIO_DEV_PCS;
2559 			else if (mmd > MDIO_DEV_VEND2)
2560 				return (EINVAL);
2561 
2562 			error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
2563 					     mid->reg_num, &val);
2564 		} else
2565 		        error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
2566 					     mid->reg_num & 0x1f, &val);
2567 		if (error == 0)
2568 			mid->val_out = val;
2569 		break;
2570 	}
2571 	case CHELSIO_SET_MIIREG: {
2572 		struct cphy *phy = &pi->phy;
2573 		struct ch_mii_data *mid = (struct ch_mii_data *)data;
2574 
2575 		if (!phy->mdio_write)
2576 			return (EOPNOTSUPP);
2577 		if (is_10G(sc)) {
2578 			mmd = mid->phy_id >> 8;
2579 			if (!mmd)
2580 				mmd = MDIO_DEV_PCS;
2581 			else if (mmd > MDIO_DEV_VEND2)
2582 				return (EINVAL);
2583 
2584 			error = phy->mdio_write(sc, mid->phy_id & 0x1f,
2585 					      mmd, mid->reg_num, mid->val_in);
2586 		} else
2587 			error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
2588 					      mid->reg_num & 0x1f,
2589 					      mid->val_in);
2590 		break;
2591 	}
2592 	case CHELSIO_SETREG: {
2593 		struct ch_reg *edata = (struct ch_reg *)data;
2594 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2595 			return (EFAULT);
2596 		t3_write_reg(sc, edata->addr, edata->val);
2597 		break;
2598 	}
2599 	case CHELSIO_GETREG: {
2600 		struct ch_reg *edata = (struct ch_reg *)data;
2601 		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2602 			return (EFAULT);
2603 		edata->val = t3_read_reg(sc, edata->addr);
2604 		break;
2605 	}
2606 	case CHELSIO_GET_SGE_CONTEXT: {
2607 		struct ch_cntxt *ecntxt = (struct ch_cntxt *)data;
2608 		mtx_lock_spin(&sc->sge.reg_lock);
2609 		switch (ecntxt->cntxt_type) {
2610 		case CNTXT_TYPE_EGRESS:
2611 			error = -t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
2612 			    ecntxt->data);
2613 			break;
2614 		case CNTXT_TYPE_FL:
2615 			error = -t3_sge_read_fl(sc, ecntxt->cntxt_id,
2616 			    ecntxt->data);
2617 			break;
2618 		case CNTXT_TYPE_RSP:
2619 			error = -t3_sge_read_rspq(sc, ecntxt->cntxt_id,
2620 			    ecntxt->data);
2621 			break;
2622 		case CNTXT_TYPE_CQ:
2623 			error = -t3_sge_read_cq(sc, ecntxt->cntxt_id,
2624 			    ecntxt->data);
2625 			break;
2626 		default:
2627 			error = EINVAL;
2628 			break;
2629 		}
2630 		mtx_unlock_spin(&sc->sge.reg_lock);
2631 		break;
2632 	}
2633 	case CHELSIO_GET_SGE_DESC: {
2634 		struct ch_desc *edesc = (struct ch_desc *)data;
2635 		int ret;
2636 		if (edesc->queue_num >= SGE_QSETS * 6)
2637 			return (EINVAL);
2638 		ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
2639 		    edesc->queue_num % 6, edesc->idx, edesc->data);
2640 		if (ret < 0)
2641 			return (EINVAL);
2642 		edesc->size = ret;
2643 		break;
2644 	}
2645 	case CHELSIO_GET_QSET_PARAMS: {
2646 		struct qset_params *q;
2647 		struct ch_qset_params *t = (struct ch_qset_params *)data;
2648 		int q1 = pi->first_qset;
2649 		int nqsets = pi->nqsets;
2650 		int i;
2651 
2652 		if (t->qset_idx >= nqsets)
2653 			return EINVAL;
2654 
2655 		i = q1 + t->qset_idx;
2656 		q = &sc->params.sge.qset[i];
2657 		t->rspq_size   = q->rspq_size;
2658 		t->txq_size[0] = q->txq_size[0];
2659 		t->txq_size[1] = q->txq_size[1];
2660 		t->txq_size[2] = q->txq_size[2];
2661 		t->fl_size[0]  = q->fl_size;
2662 		t->fl_size[1]  = q->jumbo_size;
2663 		t->polling     = q->polling;
2664 		t->lro         = q->lro;
2665 		t->intr_lat    = q->coalesce_usecs;
2666 		t->cong_thres  = q->cong_thres;
2667 		t->qnum        = i;
2668 
2669 		if ((sc->flags & FULL_INIT_DONE) == 0)
2670 			t->vector = 0;
2671 		else if (sc->flags & USING_MSIX)
2672 			t->vector = rman_get_start(sc->msix_irq_res[i]);
2673 		else
2674 			t->vector = rman_get_start(sc->irq_res);
2675 
2676 		break;
2677 	}
2678 	case CHELSIO_GET_QSET_NUM: {
2679 		struct ch_reg *edata = (struct ch_reg *)data;
2680 		edata->val = pi->nqsets;
2681 		break;
2682 	}
2683 	case CHELSIO_LOAD_FW: {
2684 		uint8_t *fw_data;
2685 		uint32_t vers;
2686 		struct ch_mem_range *t = (struct ch_mem_range *)data;
2687 
2688 		/*
2689 		 * You're allowed to load a firmware only before FULL_INIT_DONE
2690 		 *
2691 		 * FW_UPTODATE is also set so the rest of the initialization
2692 		 * will not overwrite what was loaded here.  This gives you the
2693 		 * flexibility to load any firmware (and maybe shoot yourself in
2694 		 * the foot).
2695 		 */
2696 
2697 		ADAPTER_LOCK(sc);
2698 		if (sc->open_device_map || sc->flags & FULL_INIT_DONE) {
2699 			ADAPTER_UNLOCK(sc);
2700 			return (EBUSY);
2701 		}
2702 
2703 		fw_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
2704 		if (!fw_data)
2705 			error = ENOMEM;
2706 		else
2707 			error = copyin(t->buf, fw_data, t->len);
2708 
2709 		if (!error)
2710 			error = -t3_load_fw(sc, fw_data, t->len);
2711 
2712 		if (t3_get_fw_version(sc, &vers) == 0) {
2713 			snprintf(&sc->fw_version[0], sizeof(sc->fw_version),
2714 			    "%d.%d.%d", G_FW_VERSION_MAJOR(vers),
2715 			    G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers));
2716 		}
2717 
2718 		if (!error)
2719 			sc->flags |= FW_UPTODATE;
2720 
2721 		free(fw_data, M_DEVBUF);
2722 		ADAPTER_UNLOCK(sc);
2723 		break;
2724 	}
2725 	case CHELSIO_LOAD_BOOT: {
2726 		uint8_t *boot_data;
2727 		struct ch_mem_range *t = (struct ch_mem_range *)data;
2728 
2729 		boot_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
2730 		if (!boot_data)
2731 			return ENOMEM;
2732 
2733 		error = copyin(t->buf, boot_data, t->len);
2734 		if (!error)
2735 			error = -t3_load_boot(sc, boot_data, t->len);
2736 
2737 		free(boot_data, M_DEVBUF);
2738 		break;
2739 	}
2740 	case CHELSIO_GET_PM: {
2741 		struct ch_pm *m = (struct ch_pm *)data;
2742 		struct tp_params *p = &sc->params.tp;
2743 
2744 		if (!is_offload(sc))
2745 			return (EOPNOTSUPP);
2746 
2747 		m->tx_pg_sz = p->tx_pg_size;
2748 		m->tx_num_pg = p->tx_num_pgs;
2749 		m->rx_pg_sz  = p->rx_pg_size;
2750 		m->rx_num_pg = p->rx_num_pgs;
2751 		m->pm_total  = p->pmtx_size + p->chan_rx_size * p->nchan;
2752 
2753 		break;
2754 	}
2755 	case CHELSIO_SET_PM: {
2756 		struct ch_pm *m = (struct ch_pm *)data;
2757 		struct tp_params *p = &sc->params.tp;
2758 
2759 		if (!is_offload(sc))
2760 			return (EOPNOTSUPP);
2761 		if (sc->flags & FULL_INIT_DONE)
2762 			return (EBUSY);
2763 
2764 		if (!m->rx_pg_sz || (m->rx_pg_sz & (m->rx_pg_sz - 1)) ||
2765 		    !m->tx_pg_sz || (m->tx_pg_sz & (m->tx_pg_sz - 1)))
2766 			return (EINVAL);	/* not power of 2 */
2767 		if (!(m->rx_pg_sz & 0x14000))
2768 			return (EINVAL);	/* not 16KB or 64KB */
2769 		if (!(m->tx_pg_sz & 0x1554000))
2770 			return (EINVAL);
2771 		if (m->tx_num_pg == -1)
2772 			m->tx_num_pg = p->tx_num_pgs;
2773 		if (m->rx_num_pg == -1)
2774 			m->rx_num_pg = p->rx_num_pgs;
2775 		if (m->tx_num_pg % 24 || m->rx_num_pg % 24)
2776 			return (EINVAL);
2777 		if (m->rx_num_pg * m->rx_pg_sz > p->chan_rx_size ||
2778 		    m->tx_num_pg * m->tx_pg_sz > p->chan_tx_size)
2779 			return (EINVAL);
2780 
2781 		p->rx_pg_size = m->rx_pg_sz;
2782 		p->tx_pg_size = m->tx_pg_sz;
2783 		p->rx_num_pgs = m->rx_num_pg;
2784 		p->tx_num_pgs = m->tx_num_pg;
2785 		break;
2786 	}
2787 	case CHELSIO_SETMTUTAB: {
2788 		struct ch_mtus *m = (struct ch_mtus *)data;
2789 		int i;
2790 
2791 		if (!is_offload(sc))
2792 			return (EOPNOTSUPP);
2793 		if (offload_running(sc))
2794 			return (EBUSY);
2795 		if (m->nmtus != NMTUS)
2796 			return (EINVAL);
2797 		if (m->mtus[0] < 81)         /* accommodate SACK */
2798 			return (EINVAL);
2799 
2800 		/*
2801 		 * MTUs must be in ascending order
2802 		 */
2803 		for (i = 1; i < NMTUS; ++i)
2804 			if (m->mtus[i] < m->mtus[i - 1])
2805 				return (EINVAL);
2806 
2807 		memcpy(sc->params.mtus, m->mtus, sizeof(sc->params.mtus));
2808 		break;
2809 	}
2810 	case CHELSIO_GETMTUTAB: {
2811 		struct ch_mtus *m = (struct ch_mtus *)data;
2812 
2813 		if (!is_offload(sc))
2814 			return (EOPNOTSUPP);
2815 
2816 		memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
2817 		m->nmtus = NMTUS;
2818 		break;
2819 	}
2820 	case CHELSIO_GET_MEM: {
2821 		struct ch_mem_range *t = (struct ch_mem_range *)data;
2822 		struct mc7 *mem;
2823 		uint8_t *useraddr;
2824 		u64 buf[32];
2825 
2826 		/*
2827 		 * Use these to avoid modifying len/addr in the return
2828 		 * struct
2829 		 */
2830 		uint32_t len = t->len, addr = t->addr;
2831 
2832 		if (!is_offload(sc))
2833 			return (EOPNOTSUPP);
2834 		if (!(sc->flags & FULL_INIT_DONE))
2835 			return (EIO);         /* need the memory controllers */
2836 		if ((addr & 0x7) || (len & 0x7))
2837 			return (EINVAL);
2838 		if (t->mem_id == MEM_CM)
2839 			mem = &sc->cm;
2840 		else if (t->mem_id == MEM_PMRX)
2841 			mem = &sc->pmrx;
2842 		else if (t->mem_id == MEM_PMTX)
2843 			mem = &sc->pmtx;
2844 		else
2845 			return (EINVAL);
2846 
2847 		/*
2848 		 * Version scheme:
2849 		 * bits 0..9: chip version
2850 		 * bits 10..15: chip revision
2851 		 */
2852 		t->version = 3 | (sc->params.rev << 10);
2853 
2854 		/*
2855 		 * Read 256 bytes at a time as len can be large and we don't
2856 		 * want to use huge intermediate buffers.
2857 		 */
2858 		useraddr = (uint8_t *)t->buf;
2859 		while (len) {
2860 			unsigned int chunk = min(len, sizeof(buf));
2861 
2862 			error = t3_mc7_bd_read(mem, addr / 8, chunk / 8, buf);
2863 			if (error)
2864 				return (-error);
2865 			if (copyout(buf, useraddr, chunk))
2866 				return (EFAULT);
2867 			useraddr += chunk;
2868 			addr += chunk;
2869 			len -= chunk;
2870 		}
2871 		break;
2872 	}
2873 	case CHELSIO_READ_TCAM_WORD: {
2874 		struct ch_tcam_word *t = (struct ch_tcam_word *)data;
2875 
2876 		if (!is_offload(sc))
2877 			return (EOPNOTSUPP);
2878 		if (!(sc->flags & FULL_INIT_DONE))
2879 			return (EIO);         /* need MC5 */
2880 		return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
2881 		break;
2882 	}
2883 	case CHELSIO_SET_TRACE_FILTER: {
2884 		struct ch_trace *t = (struct ch_trace *)data;
2885 		const struct trace_params *tp;
2886 
2887 		tp = (const struct trace_params *)&t->sip;
2888 		if (t->config_tx)
2889 			t3_config_trace_filter(sc, tp, 0, t->invert_match,
2890 					       t->trace_tx);
2891 		if (t->config_rx)
2892 			t3_config_trace_filter(sc, tp, 1, t->invert_match,
2893 					       t->trace_rx);
2894 		break;
2895 	}
2896 	case CHELSIO_SET_PKTSCHED: {
2897 		struct ch_pktsched_params *p = (struct ch_pktsched_params *)data;
2898 		if (sc->open_device_map == 0)
2899 			return (EAGAIN);
2900 		send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
2901 		    p->binding);
2902 		break;
2903 	}
2904 	case CHELSIO_IFCONF_GETREGS: {
2905 		struct ch_ifconf_regs *regs = (struct ch_ifconf_regs *)data;
2906 		int reglen = cxgb_get_regs_len();
2907 		uint8_t *buf = malloc(reglen, M_DEVBUF, M_NOWAIT);
2908 		if (buf == NULL) {
2909 			return (ENOMEM);
2910 		}
2911 		if (regs->len > reglen)
2912 			regs->len = reglen;
2913 		else if (regs->len < reglen)
2914 			error = ENOBUFS;
2915 
2916 		if (!error) {
2917 			cxgb_get_regs(sc, regs, buf);
2918 			error = copyout(buf, regs->data, reglen);
2919 		}
2920 		free(buf, M_DEVBUF);
2921 
2922 		break;
2923 	}
2924 	case CHELSIO_SET_HW_SCHED: {
2925 		struct ch_hw_sched *t = (struct ch_hw_sched *)data;
2926 		unsigned int ticks_per_usec = core_ticks_per_usec(sc);
2927 
2928 		if ((sc->flags & FULL_INIT_DONE) == 0)
2929 			return (EAGAIN);       /* need TP to be initialized */
2930 		if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
2931 		    !in_range(t->channel, 0, 1) ||
2932 		    !in_range(t->kbps, 0, 10000000) ||
2933 		    !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
2934 		    !in_range(t->flow_ipg, 0,
2935 			      dack_ticks_to_usec(sc, 0x7ff)))
2936 			return (EINVAL);
2937 
2938 		if (t->kbps >= 0) {
2939 			error = t3_config_sched(sc, t->kbps, t->sched);
2940 			if (error < 0)
2941 				return (-error);
2942 		}
2943 		if (t->class_ipg >= 0)
2944 			t3_set_sched_ipg(sc, t->sched, t->class_ipg);
2945 		if (t->flow_ipg >= 0) {
2946 			t->flow_ipg *= 1000;     /* us -> ns */
2947 			t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
2948 		}
2949 		if (t->mode >= 0) {
2950 			int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
2951 
2952 			t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2953 					 bit, t->mode ? bit : 0);
2954 		}
2955 		if (t->channel >= 0)
2956 			t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2957 					 1 << t->sched, t->channel << t->sched);
2958 		break;
2959 	}
2960 	case CHELSIO_GET_EEPROM: {
2961 		int i;
2962 		struct ch_eeprom *e = (struct ch_eeprom *)data;
2963 		uint8_t *buf;
2964 
2965 		if (e->offset & 3 || e->offset >= EEPROMSIZE ||
2966 		    e->len > EEPROMSIZE || e->offset + e->len > EEPROMSIZE) {
2967 			return (EINVAL);
2968 		}
2969 
2970 		buf = malloc(EEPROMSIZE, M_DEVBUF, M_NOWAIT);
2971 		if (buf == NULL) {
2972 			return (ENOMEM);
2973 		}
2974 		e->magic = EEPROM_MAGIC;
2975 		for (i = e->offset & ~3; !error && i < e->offset + e->len; i += 4)
2976 			error = -t3_seeprom_read(sc, i, (uint32_t *)&buf[i]);
2977 
2978 		if (!error)
2979 			error = copyout(buf + e->offset, e->data, e->len);
2980 
2981 		free(buf, M_DEVBUF);
2982 		break;
2983 	}
2984 	case CHELSIO_CLEAR_STATS: {
2985 		if (!(sc->flags & FULL_INIT_DONE))
2986 			return EAGAIN;
2987 
2988 		PORT_LOCK(pi);
2989 		t3_mac_update_stats(&pi->mac);
2990 		memset(&pi->mac.stats, 0, sizeof(pi->mac.stats));
2991 		PORT_UNLOCK(pi);
2992 		break;
2993 	}
2994 	case CHELSIO_GET_UP_LA: {
2995 		struct ch_up_la *la = (struct ch_up_la *)data;
2996 		uint8_t *buf = malloc(LA_BUFSIZE, M_DEVBUF, M_NOWAIT);
2997 		if (buf == NULL) {
2998 			return (ENOMEM);
2999 		}
3000 		if (la->bufsize < LA_BUFSIZE)
3001 			error = ENOBUFS;
3002 
3003 		if (!error)
3004 			error = -t3_get_up_la(sc, &la->stopped, &la->idx,
3005 					      &la->bufsize, buf);
3006 		if (!error)
3007 			error = copyout(buf, la->data, la->bufsize);
3008 
3009 		free(buf, M_DEVBUF);
3010 		break;
3011 	}
3012 	case CHELSIO_GET_UP_IOQS: {
3013 		struct ch_up_ioqs *ioqs = (struct ch_up_ioqs *)data;
3014 		uint8_t *buf = malloc(IOQS_BUFSIZE, M_DEVBUF, M_NOWAIT);
3015 		uint32_t *v;
3016 
3017 		if (buf == NULL) {
3018 			return (ENOMEM);
3019 		}
3020 		if (ioqs->bufsize < IOQS_BUFSIZE)
3021 			error = ENOBUFS;
3022 
3023 		if (!error)
3024 			error = -t3_get_up_ioqs(sc, &ioqs->bufsize, buf);
3025 
3026 		if (!error) {
3027 			v = (uint32_t *)buf;
3028 
3029 			ioqs->ioq_rx_enable = *v++;
3030 			ioqs->ioq_tx_enable = *v++;
3031 			ioqs->ioq_rx_status = *v++;
3032 			ioqs->ioq_tx_status = *v++;
3033 
3034 			error = copyout(v, ioqs->data, ioqs->bufsize);
3035 		}
3036 
3037 		free(buf, M_DEVBUF);
3038 		break;
3039 	}
3040 	case CHELSIO_SET_FILTER: {
3041 		struct ch_filter *f = (struct ch_filter *)data;
3042 		struct filter_info *p;
3043 		unsigned int nfilters = sc->params.mc5.nfilters;
3044 
3045 		if (!is_offload(sc))
3046 			return (EOPNOTSUPP);	/* No TCAM */
3047 		if (!(sc->flags & FULL_INIT_DONE))
3048 			return (EAGAIN);	/* mc5 not setup yet */
3049 		if (nfilters == 0)
3050 			return (EBUSY);		/* TOE will use TCAM */
3051 
3052 		/* sanity checks */
3053 		if (f->filter_id >= nfilters ||
3054 		    (f->val.dip && f->mask.dip != 0xffffffff) ||
3055 		    (f->val.sport && f->mask.sport != 0xffff) ||
3056 		    (f->val.dport && f->mask.dport != 0xffff) ||
3057 		    (f->val.vlan && f->mask.vlan != 0xfff) ||
3058 		    (f->val.vlan_prio &&
3059 			f->mask.vlan_prio != FILTER_NO_VLAN_PRI) ||
3060 		    (f->mac_addr_idx != 0xffff && f->mac_addr_idx > 15) ||
3061 		    f->qset >= SGE_QSETS ||
3062 		    sc->rrss_map[f->qset] >= RSS_TABLE_SIZE)
3063 			return (EINVAL);
3064 
3065 		/* Was allocated with M_WAITOK */
3066 		KASSERT(sc->filters, ("filter table NULL\n"));
3067 
3068 		p = &sc->filters[f->filter_id];
3069 		if (p->locked)
3070 			return (EPERM);
3071 
3072 		bzero(p, sizeof(*p));
3073 		p->sip = f->val.sip;
3074 		p->sip_mask = f->mask.sip;
3075 		p->dip = f->val.dip;
3076 		p->sport = f->val.sport;
3077 		p->dport = f->val.dport;
3078 		p->vlan = f->mask.vlan ? f->val.vlan : 0xfff;
3079 		p->vlan_prio = f->mask.vlan_prio ? (f->val.vlan_prio & 6) :
3080 		    FILTER_NO_VLAN_PRI;
3081 		p->mac_hit = f->mac_hit;
3082 		p->mac_vld = f->mac_addr_idx != 0xffff;
3083 		p->mac_idx = f->mac_addr_idx;
3084 		p->pkt_type = f->proto;
3085 		p->report_filter_id = f->want_filter_id;
3086 		p->pass = f->pass;
3087 		p->rss = f->rss;
3088 		p->qset = f->qset;
3089 
3090 		error = set_filter(sc, f->filter_id, p);
3091 		if (error == 0)
3092 			p->valid = 1;
3093 		break;
3094 	}
3095 	case CHELSIO_DEL_FILTER: {
3096 		struct ch_filter *f = (struct ch_filter *)data;
3097 		struct filter_info *p;
3098 		unsigned int nfilters = sc->params.mc5.nfilters;
3099 
3100 		if (!is_offload(sc))
3101 			return (EOPNOTSUPP);
3102 		if (!(sc->flags & FULL_INIT_DONE))
3103 			return (EAGAIN);
3104 		if (nfilters == 0 || sc->filters == NULL)
3105 			return (EINVAL);
3106 		if (f->filter_id >= nfilters)
3107 		       return (EINVAL);
3108 
3109 		p = &sc->filters[f->filter_id];
3110 		if (p->locked)
3111 			return (EPERM);
3112 		if (!p->valid)
3113 			return (EFAULT); /* Read "Bad address" as "Bad index" */
3114 
3115 		bzero(p, sizeof(*p));
3116 		p->sip = p->sip_mask = 0xffffffff;
3117 		p->vlan = 0xfff;
3118 		p->vlan_prio = FILTER_NO_VLAN_PRI;
3119 		p->pkt_type = 1;
3120 		error = set_filter(sc, f->filter_id, p);
3121 		break;
3122 	}
3123 	case CHELSIO_GET_FILTER: {
3124 		struct ch_filter *f = (struct ch_filter *)data;
3125 		struct filter_info *p;
3126 		unsigned int i, nfilters = sc->params.mc5.nfilters;
3127 
3128 		if (!is_offload(sc))
3129 			return (EOPNOTSUPP);
3130 		if (!(sc->flags & FULL_INIT_DONE))
3131 			return (EAGAIN);
3132 		if (nfilters == 0 || sc->filters == NULL)
3133 			return (EINVAL);
3134 
3135 		i = f->filter_id == 0xffffffff ? 0 : f->filter_id + 1;
3136 		for (; i < nfilters; i++) {
3137 			p = &sc->filters[i];
3138 			if (!p->valid)
3139 				continue;
3140 
3141 			bzero(f, sizeof(*f));
3142 
3143 			f->filter_id = i;
3144 			f->val.sip = p->sip;
3145 			f->mask.sip = p->sip_mask;
3146 			f->val.dip = p->dip;
3147 			f->mask.dip = p->dip ? 0xffffffff : 0;
3148 			f->val.sport = p->sport;
3149 			f->mask.sport = p->sport ? 0xffff : 0;
3150 			f->val.dport = p->dport;
3151 			f->mask.dport = p->dport ? 0xffff : 0;
3152 			f->val.vlan = p->vlan == 0xfff ? 0 : p->vlan;
3153 			f->mask.vlan = p->vlan == 0xfff ? 0 : 0xfff;
3154 			f->val.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
3155 			    0 : p->vlan_prio;
3156 			f->mask.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
3157 			    0 : FILTER_NO_VLAN_PRI;
3158 			f->mac_hit = p->mac_hit;
3159 			f->mac_addr_idx = p->mac_vld ? p->mac_idx : 0xffff;
3160 			f->proto = p->pkt_type;
3161 			f->want_filter_id = p->report_filter_id;
3162 			f->pass = p->pass;
3163 			f->rss = p->rss;
3164 			f->qset = p->qset;
3165 
3166 			break;
3167 		}
3168 
3169 		if (i == nfilters)
3170 			f->filter_id = 0xffffffff;
3171 		break;
3172 	}
3173 	default:
3174 		return (EOPNOTSUPP);
3175 		break;
3176 	}
3177 
3178 	return (error);
3179 }
3180 
3181 static __inline void
reg_block_dump(struct adapter * ap,uint8_t * buf,unsigned int start,unsigned int end)3182 reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
3183     unsigned int end)
3184 {
3185 	uint32_t *p = (uint32_t *)(buf + start);
3186 
3187 	for ( ; start <= end; start += sizeof(uint32_t))
3188 		*p++ = t3_read_reg(ap, start);
3189 }
3190 
3191 #define T3_REGMAP_SIZE (3 * 1024)
3192 static int
cxgb_get_regs_len(void)3193 cxgb_get_regs_len(void)
3194 {
3195 	return T3_REGMAP_SIZE;
3196 }
3197 
3198 static void
cxgb_get_regs(adapter_t * sc,struct ch_ifconf_regs * regs,uint8_t * buf)3199 cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf)
3200 {
3201 
3202 	/*
3203 	 * Version scheme:
3204 	 * bits 0..9: chip version
3205 	 * bits 10..15: chip revision
3206 	 * bit 31: set for PCIe cards
3207 	 */
3208 	regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
3209 
3210 	/*
3211 	 * We skip the MAC statistics registers because they are clear-on-read.
3212 	 * Also reading multi-register stats would need to synchronize with the
3213 	 * periodic mac stats accumulation.  Hard to justify the complexity.
3214 	 */
3215 	memset(buf, 0, cxgb_get_regs_len());
3216 	reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
3217 	reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
3218 	reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
3219 	reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
3220 	reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
3221 	reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0,
3222 		       XGM_REG(A_XGM_SERDES_STAT3, 1));
3223 	reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
3224 		       XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
3225 }
3226 
3227 static int
alloc_filters(struct adapter * sc)3228 alloc_filters(struct adapter *sc)
3229 {
3230 	struct filter_info *p;
3231 	unsigned int nfilters = sc->params.mc5.nfilters;
3232 
3233 	if (nfilters == 0)
3234 		return (0);
3235 
3236 	p = malloc(sizeof(*p) * nfilters, M_DEVBUF, M_WAITOK | M_ZERO);
3237 	sc->filters = p;
3238 
3239 	p = &sc->filters[nfilters - 1];
3240 	p->vlan = 0xfff;
3241 	p->vlan_prio = FILTER_NO_VLAN_PRI;
3242 	p->pass = p->rss = p->valid = p->locked = 1;
3243 
3244 	return (0);
3245 }
3246 
3247 static int
setup_hw_filters(struct adapter * sc)3248 setup_hw_filters(struct adapter *sc)
3249 {
3250 	int i, rc;
3251 	unsigned int nfilters = sc->params.mc5.nfilters;
3252 
3253 	if (!sc->filters)
3254 		return (0);
3255 
3256 	t3_enable_filters(sc);
3257 
3258 	for (i = rc = 0; i < nfilters && !rc; i++) {
3259 		if (sc->filters[i].locked)
3260 			rc = set_filter(sc, i, &sc->filters[i]);
3261 	}
3262 
3263 	return (rc);
3264 }
3265 
3266 static int
set_filter(struct adapter * sc,int id,const struct filter_info * f)3267 set_filter(struct adapter *sc, int id, const struct filter_info *f)
3268 {
3269 	int len;
3270 	struct mbuf *m;
3271 	struct ulp_txpkt *txpkt;
3272 	struct work_request_hdr *wr;
3273 	struct cpl_pass_open_req *oreq;
3274 	struct cpl_set_tcb_field *sreq;
3275 
3276 	len = sizeof(*wr) + sizeof(*oreq) + 2 * sizeof(*sreq);
3277 	KASSERT(len <= MHLEN, ("filter request too big for an mbuf"));
3278 
3279 	id += t3_mc5_size(&sc->mc5) - sc->params.mc5.nroutes -
3280 	      sc->params.mc5.nfilters;
3281 
3282 	m = m_gethdr(M_WAITOK, MT_DATA);
3283 	m->m_len = m->m_pkthdr.len = len;
3284 	bzero(mtod(m, char *), len);
3285 
3286 	wr = mtod(m, struct work_request_hdr *);
3287 	wr->wrh_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS) | F_WR_ATOMIC);
3288 
3289 	oreq = (struct cpl_pass_open_req *)(wr + 1);
3290 	txpkt = (struct ulp_txpkt *)oreq;
3291 	txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
3292 	txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*oreq) / 8));
3293 	OPCODE_TID(oreq) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, id));
3294 	oreq->local_port = htons(f->dport);
3295 	oreq->peer_port = htons(f->sport);
3296 	oreq->local_ip = htonl(f->dip);
3297 	oreq->peer_ip = htonl(f->sip);
3298 	oreq->peer_netmask = htonl(f->sip_mask);
3299 	oreq->opt0h = 0;
3300 	oreq->opt0l = htonl(F_NO_OFFLOAD);
3301 	oreq->opt1 = htonl(V_MAC_MATCH_VALID(f->mac_vld) |
3302 			 V_CONN_POLICY(CPL_CONN_POLICY_FILTER) |
3303 			 V_VLAN_PRI(f->vlan_prio >> 1) |
3304 			 V_VLAN_PRI_VALID(f->vlan_prio != FILTER_NO_VLAN_PRI) |
3305 			 V_PKT_TYPE(f->pkt_type) | V_OPT1_VLAN(f->vlan) |
3306 			 V_MAC_MATCH(f->mac_idx | (f->mac_hit << 4)));
3307 
3308 	sreq = (struct cpl_set_tcb_field *)(oreq + 1);
3309 	set_tcb_field_ulp(sreq, id, 1, 0x1800808000ULL,
3310 			  (f->report_filter_id << 15) | (1 << 23) |
3311 			  ((u64)f->pass << 35) | ((u64)!f->rss << 36));
3312 	set_tcb_field_ulp(sreq + 1, id, 0, 0xffffffff, (2 << 19) | 1);
3313 	t3_mgmt_tx(sc, m);
3314 
3315 	if (f->pass && !f->rss) {
3316 		len = sizeof(*sreq);
3317 		m = m_gethdr(M_WAITOK, MT_DATA);
3318 		m->m_len = m->m_pkthdr.len = len;
3319 		bzero(mtod(m, char *), len);
3320 		sreq = mtod(m, struct cpl_set_tcb_field *);
3321 		sreq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
3322 		mk_set_tcb_field(sreq, id, 25, 0x3f80000,
3323 				 (u64)sc->rrss_map[f->qset] << 19);
3324 		t3_mgmt_tx(sc, m);
3325 	}
3326 	return 0;
3327 }
3328 
3329 static inline void
mk_set_tcb_field(struct cpl_set_tcb_field * req,unsigned int tid,unsigned int word,u64 mask,u64 val)3330 mk_set_tcb_field(struct cpl_set_tcb_field *req, unsigned int tid,
3331     unsigned int word, u64 mask, u64 val)
3332 {
3333 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
3334 	req->reply = V_NO_REPLY(1);
3335 	req->cpu_idx = 0;
3336 	req->word = htons(word);
3337 	req->mask = htobe64(mask);
3338 	req->val = htobe64(val);
3339 }
3340 
3341 static inline void
set_tcb_field_ulp(struct cpl_set_tcb_field * req,unsigned int tid,unsigned int word,u64 mask,u64 val)3342 set_tcb_field_ulp(struct cpl_set_tcb_field *req, unsigned int tid,
3343     unsigned int word, u64 mask, u64 val)
3344 {
3345 	struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
3346 
3347 	txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
3348 	txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*req) / 8));
3349 	mk_set_tcb_field(req, tid, word, mask, val);
3350 }
3351 
3352 void
t3_iterate(void (* func)(struct adapter *,void *),void * arg)3353 t3_iterate(void (*func)(struct adapter *, void *), void *arg)
3354 {
3355 	struct adapter *sc;
3356 
3357 	mtx_lock(&t3_list_lock);
3358 	SLIST_FOREACH(sc, &t3_list, link) {
3359 		/*
3360 		 * func should not make any assumptions about what state sc is
3361 		 * in - the only guarantee is that sc->sc_lock is a valid lock.
3362 		 */
3363 		func(sc, arg);
3364 	}
3365 	mtx_unlock(&t3_list_lock);
3366 }
3367 
3368 #ifdef TCP_OFFLOAD
3369 static int
toe_capability(struct port_info * pi,int enable)3370 toe_capability(struct port_info *pi, int enable)
3371 {
3372 	int rc;
3373 	struct adapter *sc = pi->adapter;
3374 
3375 	ADAPTER_LOCK_ASSERT_OWNED(sc);
3376 
3377 	if (!is_offload(sc))
3378 		return (ENODEV);
3379 
3380 	if (enable) {
3381 		if (!(sc->flags & FULL_INIT_DONE)) {
3382 			log(LOG_WARNING,
3383 			    "You must enable a cxgb interface first\n");
3384 			return (EAGAIN);
3385 		}
3386 
3387 		if (isset(&sc->offload_map, pi->port_id))
3388 			return (0);
3389 
3390 		if (!(sc->flags & TOM_INIT_DONE)) {
3391 			rc = t3_activate_uld(sc, ULD_TOM);
3392 			if (rc == EAGAIN) {
3393 				log(LOG_WARNING,
3394 				    "You must kldload t3_tom.ko before trying "
3395 				    "to enable TOE on a cxgb interface.\n");
3396 			}
3397 			if (rc != 0)
3398 				return (rc);
3399 			KASSERT(sc->tom_softc != NULL,
3400 			    ("%s: TOM activated but softc NULL", __func__));
3401 			KASSERT(sc->flags & TOM_INIT_DONE,
3402 			    ("%s: TOM activated but flag not set", __func__));
3403 		}
3404 
3405 		setbit(&sc->offload_map, pi->port_id);
3406 
3407 		/*
3408 		 * XXX: Temporary code to allow iWARP to be enabled when TOE is
3409 		 * enabled on any port.  Need to figure out how to enable,
3410 		 * disable, load, and unload iWARP cleanly.
3411 		 */
3412 		if (!isset(&sc->offload_map, MAX_NPORTS) &&
3413 		    t3_activate_uld(sc, ULD_IWARP) == 0)
3414 			setbit(&sc->offload_map, MAX_NPORTS);
3415 	} else {
3416 		if (!isset(&sc->offload_map, pi->port_id))
3417 			return (0);
3418 
3419 		KASSERT(sc->flags & TOM_INIT_DONE,
3420 		    ("%s: TOM never initialized?", __func__));
3421 		clrbit(&sc->offload_map, pi->port_id);
3422 	}
3423 
3424 	return (0);
3425 }
3426 
3427 /*
3428  * Add an upper layer driver to the global list.
3429  */
3430 int
t3_register_uld(struct uld_info * ui)3431 t3_register_uld(struct uld_info *ui)
3432 {
3433 	int rc = 0;
3434 	struct uld_info *u;
3435 
3436 	mtx_lock(&t3_uld_list_lock);
3437 	SLIST_FOREACH(u, &t3_uld_list, link) {
3438 	    if (u->uld_id == ui->uld_id) {
3439 		    rc = EEXIST;
3440 		    goto done;
3441 	    }
3442 	}
3443 
3444 	SLIST_INSERT_HEAD(&t3_uld_list, ui, link);
3445 	ui->refcount = 0;
3446 done:
3447 	mtx_unlock(&t3_uld_list_lock);
3448 	return (rc);
3449 }
3450 
3451 int
t3_unregister_uld(struct uld_info * ui)3452 t3_unregister_uld(struct uld_info *ui)
3453 {
3454 	int rc = EINVAL;
3455 	struct uld_info *u;
3456 
3457 	mtx_lock(&t3_uld_list_lock);
3458 
3459 	SLIST_FOREACH(u, &t3_uld_list, link) {
3460 	    if (u == ui) {
3461 		    if (ui->refcount > 0) {
3462 			    rc = EBUSY;
3463 			    goto done;
3464 		    }
3465 
3466 		    SLIST_REMOVE(&t3_uld_list, ui, uld_info, link);
3467 		    rc = 0;
3468 		    goto done;
3469 	    }
3470 	}
3471 done:
3472 	mtx_unlock(&t3_uld_list_lock);
3473 	return (rc);
3474 }
3475 
3476 int
t3_activate_uld(struct adapter * sc,int id)3477 t3_activate_uld(struct adapter *sc, int id)
3478 {
3479 	int rc = EAGAIN;
3480 	struct uld_info *ui;
3481 
3482 	mtx_lock(&t3_uld_list_lock);
3483 
3484 	SLIST_FOREACH(ui, &t3_uld_list, link) {
3485 		if (ui->uld_id == id) {
3486 			rc = ui->activate(sc);
3487 			if (rc == 0)
3488 				ui->refcount++;
3489 			goto done;
3490 		}
3491 	}
3492 done:
3493 	mtx_unlock(&t3_uld_list_lock);
3494 
3495 	return (rc);
3496 }
3497 
3498 int
t3_deactivate_uld(struct adapter * sc,int id)3499 t3_deactivate_uld(struct adapter *sc, int id)
3500 {
3501 	int rc = EINVAL;
3502 	struct uld_info *ui;
3503 
3504 	mtx_lock(&t3_uld_list_lock);
3505 
3506 	SLIST_FOREACH(ui, &t3_uld_list, link) {
3507 		if (ui->uld_id == id) {
3508 			rc = ui->deactivate(sc);
3509 			if (rc == 0)
3510 				ui->refcount--;
3511 			goto done;
3512 		}
3513 	}
3514 done:
3515 	mtx_unlock(&t3_uld_list_lock);
3516 
3517 	return (rc);
3518 }
3519 
3520 static int
cpl_not_handled(struct sge_qset * qs __unused,struct rsp_desc * r __unused,struct mbuf * m)3521 cpl_not_handled(struct sge_qset *qs __unused, struct rsp_desc *r __unused,
3522     struct mbuf *m)
3523 {
3524 	m_freem(m);
3525 	return (EDOOFUS);
3526 }
3527 
3528 int
t3_register_cpl_handler(struct adapter * sc,int opcode,cpl_handler_t h)3529 t3_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
3530 {
3531 	uintptr_t *loc, new;
3532 
3533 	if (opcode >= NUM_CPL_HANDLERS)
3534 		return (EINVAL);
3535 
3536 	new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
3537 	loc = (uintptr_t *) &sc->cpl_handler[opcode];
3538 	atomic_store_rel_ptr(loc, new);
3539 
3540 	return (0);
3541 }
3542 #endif
3543 
3544 static int
cxgbc_mod_event(module_t mod,int cmd,void * arg)3545 cxgbc_mod_event(module_t mod, int cmd, void *arg)
3546 {
3547 	int rc = 0;
3548 
3549 	switch (cmd) {
3550 	case MOD_LOAD:
3551 		mtx_init(&t3_list_lock, "T3 adapters", 0, MTX_DEF);
3552 		SLIST_INIT(&t3_list);
3553 #ifdef TCP_OFFLOAD
3554 		mtx_init(&t3_uld_list_lock, "T3 ULDs", 0, MTX_DEF);
3555 		SLIST_INIT(&t3_uld_list);
3556 #endif
3557 		break;
3558 
3559 	case MOD_UNLOAD:
3560 #ifdef TCP_OFFLOAD
3561 		mtx_lock(&t3_uld_list_lock);
3562 		if (!SLIST_EMPTY(&t3_uld_list)) {
3563 			rc = EBUSY;
3564 			mtx_unlock(&t3_uld_list_lock);
3565 			break;
3566 		}
3567 		mtx_unlock(&t3_uld_list_lock);
3568 		mtx_destroy(&t3_uld_list_lock);
3569 #endif
3570 		mtx_lock(&t3_list_lock);
3571 		if (!SLIST_EMPTY(&t3_list)) {
3572 			rc = EBUSY;
3573 			mtx_unlock(&t3_list_lock);
3574 			break;
3575 		}
3576 		mtx_unlock(&t3_list_lock);
3577 		mtx_destroy(&t3_list_lock);
3578 		break;
3579 	}
3580 
3581 	return (rc);
3582 }
3583 
3584 #ifdef DEBUGNET
3585 static void
cxgb_debugnet_init(if_t ifp,int * nrxr,int * ncl,int * clsize)3586 cxgb_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize)
3587 {
3588 	struct port_info *pi;
3589 	adapter_t *adap;
3590 
3591 	pi = if_getsoftc(ifp);
3592 	adap = pi->adapter;
3593 	ADAPTER_LOCK(adap);
3594 	*nrxr = adap->nqsets;
3595 	*ncl = adap->sge.qs[0].fl[1].size;
3596 	*clsize = adap->sge.qs[0].fl[1].buf_size;
3597 	ADAPTER_UNLOCK(adap);
3598 }
3599 
3600 static void
cxgb_debugnet_event(if_t ifp,enum debugnet_ev event)3601 cxgb_debugnet_event(if_t ifp, enum debugnet_ev event)
3602 {
3603 	struct port_info *pi;
3604 	struct sge_qset *qs;
3605 	int i;
3606 
3607 	pi = if_getsoftc(ifp);
3608 	if (event == DEBUGNET_START)
3609 		for (i = 0; i < pi->adapter->nqsets; i++) {
3610 			qs = &pi->adapter->sge.qs[i];
3611 
3612 			/* Need to reinit after debugnet_mbuf_start(). */
3613 			qs->fl[0].zone = zone_pack;
3614 			qs->fl[1].zone = zone_clust;
3615 			qs->lro.enabled = 0;
3616 		}
3617 }
3618 
3619 static int
cxgb_debugnet_transmit(if_t ifp,struct mbuf * m)3620 cxgb_debugnet_transmit(if_t ifp, struct mbuf *m)
3621 {
3622 	struct port_info *pi;
3623 	struct sge_qset *qs;
3624 
3625 	pi = if_getsoftc(ifp);
3626 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
3627 	    IFF_DRV_RUNNING)
3628 		return (ENOENT);
3629 
3630 	qs = &pi->adapter->sge.qs[pi->first_qset];
3631 	return (cxgb_debugnet_encap(qs, &m));
3632 }
3633 
3634 static int
cxgb_debugnet_poll(if_t ifp,int count)3635 cxgb_debugnet_poll(if_t ifp, int count)
3636 {
3637 	struct port_info *pi;
3638 	adapter_t *adap;
3639 	int i;
3640 
3641 	pi = if_getsoftc(ifp);
3642 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
3643 		return (ENOENT);
3644 
3645 	adap = pi->adapter;
3646 	for (i = 0; i < adap->nqsets; i++)
3647 		(void)cxgb_debugnet_poll_rx(adap, &adap->sge.qs[i]);
3648 	(void)cxgb_debugnet_poll_tx(&adap->sge.qs[pi->first_qset]);
3649 	return (0);
3650 }
3651 #endif /* DEBUGNET */
3652