1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source. A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * This file is part of the Chelsio T4 Ethernet driver.
14  *
15  * Copyright (C) 2003-2013 Chelsio Communications.  All rights reserved.
16  *
17  * This program is distributed in the hope that it will be useful, but WITHOUT
18  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19  * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
20  * release for licensing terms and conditions.
21  */
22 #include <sys/queue.h>
23 #include "common.h"
24 #include "t4_regs.h"
25 #include "t4_regs_values.h"
26 #include "t4fw_interface.h"
27 #include "t4_fw.h"
28 
29 /*
30  *	t4_wait_op_done_val - wait until an operation is completed
31  *	@adapter: the adapter performing the operation
32  *	@reg: the register to check for completion
33  *	@mask: a single-bit field within @reg that indicates completion
34  *	@polarity: the value of the field when the operation is completed
35  *	@attempts: number of check iterations
36  *	@delay: delay in usecs between iterations
37  *	@valp: where to store the value of the register at completion time
38  *
39  *	Wait until an operation is completed by checking a bit in a register
40  *	up to @attempts times.  If @valp is not NULL the value of the register
41  *	at the time it indicated completion is stored there.  Returns 0 if the
42  *	operation completes and	-EAGAIN	otherwise.
43  */
44 int
45 t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
46     int polarity, int attempts, int delay, u32 *valp)
47 {
48 	int rc = 0;
49 
50 	/* LINTED: E_CONSTANT_CONDITION */
51 	while (1) {
52 		u32 val = t4_read_reg(adapter, reg);
53 
54 		if (!!(val & mask) == polarity) {
55 			if (valp != NULL)
56 				*valp = val;
57 			goto done;
58 		}
59 		if (--attempts == 0) {
60 			rc = -EAGAIN;
61 			goto done;
62 		}
63 		if (delay != 0)
64 			udelay(delay);
65 	}
66 
67 done:
68 	return (rc);
69 }
70 
71 /*
72  *	t4_set_reg_field - set a register field to a value
73  *	@adapter: the adapter to program
74  *	@addr: the register address
75  *	@mask: specifies the portion of the register to modify
76  *	@val: the new value for the register field
77  *
78  *	Sets a register field specified by the supplied mask to the
79  *	given value.
80  */
81 void
82 t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, u32 val)
83 {
84 	u32 v = t4_read_reg(adapter, addr) & ~mask;
85 
86 	t4_write_reg(adapter, addr, v | val);
87 	(void) t4_read_reg(adapter, addr);	/* flush */
88 }
89 
90 /*
91  *	t4_read_indirect - read indirectly addressed registers
92  *	@adap: the adapter
93  *	@addr_reg: register holding the indirect address
94  *	@data_reg: register holding the value of the indirect register
95  *	@vals: where the read register values are stored
96  *	@nregs: how many indirect registers to read
97  *	@start_idx: index of first indirect register to read
98  *
99  *	Reads registers that are accessed indirectly through an address/data
100  *	register pair.
101  */
102 void
103 t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
104     unsigned int data_reg, u32 *vals, unsigned int nregs,
105     unsigned int start_idx)
106 {
107 	while (nregs--) {
108 		t4_write_reg(adap, addr_reg, start_idx);
109 		*vals++ = t4_read_reg(adap, data_reg);
110 		start_idx++;
111 	}
112 }
113 
114 /*
115  *	t4_write_indirect - write indirectly addressed registers
116  *	@adap: the adapter
117  *	@addr_reg: register holding the indirect addresses
118  *	@data_reg: register holding the value for the indirect registers
119  *	@vals: values to write
120  *	@nregs: how many indirect registers to write
121  *	@start_idx: address of first indirect register to write
122  *
123  *	Writes a sequential block of registers that are accessed indirectly
124  *	through an address/data register pair.
125  */
126 void
127 t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
128     unsigned int data_reg, const u32 *vals, unsigned int nregs,
129     unsigned int start_idx)
130 {
131 	while (nregs--) {
132 		t4_write_reg(adap, addr_reg, start_idx++);
133 		t4_write_reg(adap, data_reg, *vals++);
134 	}
135 }
136 
137 /*
138  *	t4_report_fw_error - report firmware error
139  *	@adap: the adapter
140  *
141  *	The adapter firmware can indicate error conditions to the host.
142  *	This routine prints out the reason for the firmware error (as
143  *	reported by the firmware).
144  */
145 static void t4_report_fw_error(struct adapter *adap)
146 {
147 	static const char *reason[] = {
148 		"Crash",			/* PCIE_FW_EVAL_CRASH */
149 		"During Device Preparation",	/* PCIE_FW_EVAL_PREP */
150 		"During Device Configuration",	/* PCIE_FW_EVAL_CONF */
151 		"During Device Initialization",	/* PCIE_FW_EVAL_INIT */
152 		"Unexpected Event",		/* PCIE_FW_EVAL_UNEXPECTEDEVENT */
153 		"Insufficient Airflow",		/* PCIE_FW_EVAL_OVERHEAT */
154 		"Device Shutdown",		/* PCIE_FW_EVAL_DEVICESHUTDOWN */
155 		"Reserved",			/* reserved */
156 	};
157 	u32 pcie_fw;
158 
159 	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
160 	if (pcie_fw & F_PCIE_FW_ERR)
161 		CH_ERR(adap, "Firmware reports adapter error: %s\n",
162 		    reason[G_PCIE_FW_EVAL(pcie_fw)]);
163 }
164 
165 /*
166  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
167  */
168 static void
169 get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, u32 mbox_addr)
170 {
171 	for (/* */; nflit; nflit--, mbox_addr += 8)
172 		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
173 }
174 
175 /*
176  * Handle a FW assertion reported in a mailbox.
177  */
178 static void
179 fw_asrt(struct adapter *adap, u32 mbox_addr)
180 {
181 	struct fw_debug_cmd asrt;
182 
183 	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof (asrt) / 8, mbox_addr);
184 	CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
185 	    asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
186 	    ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
187 }
188 
189 #define	X_CIM_PF_NOACCESS 0xeeeeeeee
190 /*
191  *	t4_wr_mbox_meat - send a command to FW through the given mailbox
192  *	@adap: the adapter
193  *	@mbox: index of the mailbox to use
194  *	@cmd: the command to write
195  *	@size: command length in bytes
196  *	@rpl: where to optionally store the reply
197  *	@sleep_ok: if true we may sleep while awaiting command completion
198  *
199  *	Sends the given command to FW through the selected mailbox and waits
200  *	for the FW to execute the command.  If @rpl is not %NULL it is used to
201  *	store the FW's reply to the command.  The command and its optional
202  *	reply are of the same length.  Some FW commands like RESET and
203  *	INITIALIZE can take a considerable amount of time to execute.
204  *	@sleep_ok determines whether we may sleep while awaiting the response.
205  *	If sleeping is allowed we use progressive backoff otherwise we spin.
206  *
207  *	The return value is 0 on success or a negative errno on failure.  A
208  *	failure can happen either because we are not able to execute the
209  *	command or FW executes it but signals an error.  In the latter case
210  *	the return value is the error code indicated by FW (negated).
211  */
212 int
213 t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
214     void *rpl, bool sleep_ok)
215 {
216 	/*
217 	 * We delay in small increments at first in an effort to maintain
218 	 * responsiveness for simple, fast executing commands but then back
219 	 * off to larger delays to a maximum retry delay.
220 	 */
221 	static const int d[] = {
222 		1, 1, 3, 5, 10, 10, 20, 50, 100
223 	};
224 
225 	u32 v;
226 	u64 res;
227 	int i, ms, delay_idx;
228 	const __be64 *p = cmd;
229 
230 	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
231 	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
232 
233 	if ((size & 15) || size > MBOX_LEN)
234 		return (-EINVAL);
235 
236 	v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
237 	for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
238 		v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
239 
240 	if (v != X_MBOWNER_PL)
241 		return (v ? -EBUSY : -ETIMEDOUT);
242 
243 	for (i = 0; i < size; i += 8, p++)
244 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
245 
246 	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
247 	(void) t4_read_reg(adap, ctl_reg);	/* flush write */
248 
249 	delay_idx = 0;
250 	ms = d[0];
251 
252 	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
253 		if (sleep_ok != 0) {
254 			ms = d[delay_idx];  /* last element may repeat */
255 			if (delay_idx < ARRAY_SIZE(d) - 1)
256 				delay_idx++;
257 			msleep(ms);
258 		} else
259 			mdelay(ms);
260 
261 		v = t4_read_reg(adap, ctl_reg);
262 		if (v == X_CIM_PF_NOACCESS)
263 			continue;
264 		if (G_MBOWNER(v) == X_MBOWNER_PL) {
265 			if (!(v & F_MBMSGVALID)) {
266 				t4_write_reg(adap, ctl_reg,
267 				    V_MBOWNER(X_MBOWNER_NONE));
268 				continue;
269 			}
270 
271 			res = t4_read_reg64(adap, data_reg);
272 			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
273 				fw_asrt(adap, data_reg);
274 				res = V_FW_CMD_RETVAL(EIO);
275 			} else if (rpl != NULL)
276 				get_mbox_rpl(adap, rpl, size / 8, data_reg);
277 			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
278 			return (-G_FW_CMD_RETVAL((int)res));
279 		}
280 	}
281 
282 	/*
283 	 * We timed out waiting for a reply to our mailbox command. Report
284 	 * the error and also check to see if the firmware reported any
285 	 * errors ...
286 	 */
287 	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
288 	    *(const u8 *)cmd, mbox);
289 	if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
290 		t4_report_fw_error(adap);
291 	return (-ETIMEDOUT);
292 }
293 
294 /*
295  *	t4_mc_read - read from MC through backdoor accesses
296  *	@adap: the adapter
297  *	@idx: which MC to access
298  *	@addr: address of first byte requested
299  *	@data: 64 bytes of data containing the requested address
300  *	@ecc: where to store the corresponding 64-bit ECC word
301  *
302  *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
303  *	that covers the requested address @addr.  If @parity is not %NULL it
304  *	is assigned the 64-bit ECC word for the read data.
305  */
306 int
307 t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
308 {
309 	int i;
310 
311 	u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
312 	u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
313 
314 	if (is_t4(adap->params.chip)) {
315 		mc_bist_cmd_reg = A_MC_BIST_CMD;
316 		mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
317 		mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
318 		mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
319 		mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
320 	} else {
321 		mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
322 		mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
323 		mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
324 		mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
325 		    idx);
326 		mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
327 		    idx);
328 	}
329 
330 	if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
331 		return (-EBUSY);
332 	t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
333 	t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
334 	t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
335 	t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
336 	    F_START_BIST | V_BIST_CMD_GAP(1));
337 	i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
338 	if (i != 0)
339 		return (i);
340 
341 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
342 	for (i = 15; i >= 0; i--)
343 		*data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
344 	if (ecc != NULL)
345 		*ecc = t4_read_reg64(adap, MC_DATA(16));
346 #undef MC_DATA
347 	return (0);
348 }
349 
350 /*
351  *	t4_edc_read - read from EDC through backdoor accesses
352  *	@adap: the adapter
353  *	@idx: which EDC to access
354  *	@addr: address of first byte requested
355  *	@data: 64 bytes of data containing the requested address
356  *	@ecc: where to store the corresponding 64-bit ECC word
357  *
358  *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
359  *	that covers the requested address @addr.  If @parity is not %NULL it
360  *	is assigned the 64-bit ECC word for the read data.
361  */
362 int
363 t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
364 {
365 	int i;
366 
367 	u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
368 	u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
369 
370 	if (is_t4(adap->params.chip)) {
371 		edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
372 		edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
373 		edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
374 		edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
375 		    idx);
376 		edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
377 		    idx);
378 	} else {
379 /*
380  * These macro are missing in t4_regs.h file.
381  * Added temporarily for testing.
382  */
383 #define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
384 #define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
385 		edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
386 		edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
387 		edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
388 		edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
389 		    idx);
390 		edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
391 		    idx);
392 #undef EDC_REG_T5
393 #undef EDC_STRIDE_T5
394 	}
395 
396 	if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
397 		return (-EBUSY);
398 	t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
399 	t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
400 	t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
401 	t4_write_reg(adap, edc_bist_cmd_reg,
402 	    V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
403 	i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
404 	if (i != 0)
405 		return (i);
406 
407 #define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
408 	for (i = 15; i >= 0; i--)
409 		*data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
410 	if (ecc != NULL)
411 		*ecc = t4_read_reg64(adap, EDC_DATA(16));
412 #undef EDC_DATA
413 	return (0);
414 }
415 
416 /*
417  *	t4_mem_read - read EDC 0, EDC 1 or MC into buffer
418  *	@adap: the adapter
419  *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
420  *	@addr: address within indicated memory type
421  *	@len: amount of memory to read
422  *	@buf: host memory buffer
423  *
424  *	Reads an [almost] arbitrary memory region in the firmware: the
425  *	firmware memory address, length and host buffer must be aligned on
426  *	32-bit boudaries.  The memory is returned as a raw byte sequence from
427  *	the firmware's memory.  If this memory contains data structures which
428  *	contain multi-byte integers, it's the callers responsibility to
429  *	perform appropriate byte order conversions.
430  */
431 int
432 t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len, __be32 *buf)
433 {
434 	u32 pos, start, end, offset;
435 	int ret;
436 
437 	/*
438 	 * Argument sanity checks ...
439 	 */
440 	if ((addr & 0x3) || (len & 0x3))
441 		return (-EINVAL);
442 
443 	/*
444 	 * The underlaying EDC/MC read routines read 64 bytes at a time so we
445 	 * need to round down the start and round up the end.  We'll start
446 	 * copying out of the first line at (addr - start) a word at a time.
447 	 */
448 	start = addr & ~(64-1);
449 	end = (addr + len + 64-1) & ~(64-1);
450 	offset = (addr - start)/sizeof (__be32);
451 
452 	for (pos = start; pos < end; pos += 64, offset = 0) {
453 		__be32 data[16];
454 
455 		/*
456 		 * Read the chip's memory block and bail if there's an error.
457 		 */
458 		if ((mtype == MEM_MC) || (mtype == MEM_MC1))
459 			ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
460 		else
461 			ret = t4_edc_read(adap, mtype, pos, data, NULL);
462 		if (ret != 0)
463 			return (ret);
464 
465 		/*
466 		 * Copy the data into the caller's memory buffer.
467 		 */
468 		while (offset < 16 && len > 0) {
469 			*buf++ = data[offset++];
470 			len -= sizeof (__be32);
471 		}
472 	}
473 
474 	return (0);
475 }
476 
477 /*
478  *      t4_mem_win_rw - read/write memory through PCIE memory window
479  *      @adap: the adapter
480  *      @addr: address of first byte requested
481  *      @data: MEMWIN0_APERTURE bytes of data containing the requested address
482  *      @dir: direction of transfer 1 => read, 0 => write
483  *
484  *      Read/write MEMWIN0_APERTURE bytes of data from MC starting at a
485  *      MEMWIN0_APERTURE-byte-aligned address that covers the requested
486  *      address @addr.
487  */
488 static int
489 t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
490 {
491 	int i;
492 
493 	/*
494 	 * Setup offset into PCIE memory window.  Address must be a
495 	 * MEMWIN0_APERTURE-byte-aligned address.  (Read back MA register to
496 	 * ensure that changes propagate before we attempt to use the new
497 	 * values.)
498 	 */
499 	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0),
500 	    addr & ~(MEMWIN0_APERTURE - 1));
501 	(void) t4_read_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
502 	    0));
503 
504 	/* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
505 	for (i = 0; i < MEMWIN0_APERTURE; i = i + 0x4) {
506 		if (dir != 0)
507 			*data++ = t4_read_reg(adap, (MEMWIN0_BASE + i));
508 		else
509 			t4_write_reg(adap, (MEMWIN0_BASE + i), *data++);
510 	}
511 
512 	return (0);
513 }
514 
515 int
516 t4_mem_win_read(struct adapter *adap, u32 addr, __be32 *data)
517 {
518 	return (t4_mem_win_rw(adap, addr, data, 1));
519 }
520 
521 /*
522  * Partial EEPROM Vital Product Data structure.  Includes only the ID and
523  * VPD-R header.
524  */
525 struct t4_vpd_hdr {
526 	u8  id_tag;
527 	u8  id_len[2];
528 	u8  id_data[ID_LEN];
529 	u8  vpdr_tag;
530 	u8  vpdr_len[2];
531 };
532 
533 /*
534  * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
535  */
536 #define	EEPROM_MAX_RD_POLL	40
537 #define	EEPROM_MAX_WR_POLL	6
538 #define	EEPROM_STAT_ADDR	0x7bfc
539 #define	VPD_BASE		0x400
540 #define	VPD_BASE_OLD		0
541 #define	VPD_LEN			1024
542 #define	VPD_INFO_FLD_HDR_SIZE	3
543 #define	CHELSIO_VPD_UNIQUE_ID	0x82
544 
545 /*
546  *	t4_seeprom_read - read a serial EEPROM location
547  *	@adapter: adapter to read
548  *	@addr: EEPROM virtual address
549  *	@data: where to store the read data
550  *
551  *	Read a 32-bit word from a location in serial EEPROM using the card's PCI
552  *	VPD capability.  Note that this function must be called with a virtual
553  *	address.
554  */
555 int
556 t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
557 {
558 	u16 val;
559 	int attempts = EEPROM_MAX_RD_POLL;
560 	unsigned int base = adapter->params.pci.vpd_cap_addr;
561 
562 	if (addr >= EEPROMVSIZE || (addr & 3))
563 		return (-EINVAL);
564 
565 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
566 	do {
567 		udelay(10);
568 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
569 	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
570 
571 	if (!(val & PCI_VPD_ADDR_F)) {
572 		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
573 		return (-EIO);
574 	}
575 	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
576 	*data = le32_to_cpu(*data);
577 	return (0);
578 }
579 
580 /*
581  *	t4_seeprom_write - write a serial EEPROM location
582  *	@adapter: adapter to write
583  *	@addr: virtual EEPROM address
584  *	@data: value to write
585  *
586  *	Write a 32-bit word to a location in serial EEPROM using the card's PCI
587  *	VPD capability.  Note that this function must be called with a virtual
588  *	address.
589  */
590 int
591 t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
592 {
593 	u16 val;
594 	int attempts = EEPROM_MAX_WR_POLL;
595 	unsigned int base = adapter->params.pci.vpd_cap_addr;
596 
597 	if (addr >= EEPROMVSIZE || (addr & 3))
598 		return (-EINVAL);
599 
600 	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
601 	    cpu_to_le32(data));
602 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
603 	    (u16)addr | PCI_VPD_ADDR_F);
604 	do {
605 		msleep(1);
606 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
607 	} while ((val & PCI_VPD_ADDR_F) && --attempts);
608 
609 	if (val & PCI_VPD_ADDR_F) {
610 		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
611 		return (-EIO);
612 	}
613 	return (0);
614 }
615 
616 /*
617  *	t4_eeprom_ptov - translate a physical EEPROM address to virtual
618  *	@phys_addr: the physical EEPROM address
619  *	@fn: the PCI function number
620  *	@sz: size of function-specific area
621  *
622  *	Translate a physical EEPROM address to virtual.  The first 1K is
623  *	accessed through virtual addresses starting at 31K, the rest is
624  *	accessed through virtual addresses starting at 0.
625  *
626  *	The mapping is as follows:
627  *	[0..1K) -> [31K..32K)
628  *	[1K..1K+A) -> [ES-A..ES)
629  *	[1K+A..ES) -> [0..ES-A-1K)
630  *
631  *	where A = @fn * @sz, and ES = EEPROM size.
632  */
633 int
634 t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
635 {
636 	fn *= sz;
637 	if (phys_addr < 1024)
638 		return (phys_addr + (31 << 10));
639 	if (phys_addr < 1024 + fn)
640 		return (EEPROMSIZE - fn + phys_addr - 1024);
641 	if (phys_addr < EEPROMSIZE)
642 		return (phys_addr - 1024 - fn);
643 	return (-EINVAL);
644 }
645 
646 /*
647  *	t4_seeprom_wp - enable/disable EEPROM write protection
648  *	@adapter: the adapter
649  *	@enable: whether to enable or disable write protection
650  *
651  *	Enables or disables write protection on the serial EEPROM.
652  */
653 int
654 t4_seeprom_wp(struct adapter *adapter, int enable)
655 {
656 	return (t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0));
657 }
658 
659 /* serial flash and firmware constants and flash config file constants */
660 enum {
661 	SF_ATTEMPTS = 10,		/* max retries for SF operations */
662 
663 	/* flash command opcodes */
664 	SF_PROG_PAGE    = 2,		/* program page */
665 	SF_WR_DISABLE   = 4,		/* disable writes */
666 	SF_RD_STATUS    = 5,		/* read status register */
667 	SF_WR_ENABLE    = 6,		/* enable writes */
668 	SF_RD_DATA_FAST = 0xb,		/* read flash */
669 	SF_RD_ID	= 0x9f,		/* read ID */
670 	SF_ERASE_SECTOR = 0xd8,		/* erase sector */
671 
672 };
673 
674 /*
675  *	sf1_read - read data from the serial flash
676  *	@adapter: the adapter
677  *	@byte_cnt: number of bytes to read
678  *	@cont: whether another operation will be chained
679  *	@lock: whether to lock SF for PL access only
680  *	@valp: where to store the read data
681  *
682  *	Reads up to 4 bytes of data from the serial flash.  The location of
683  *	the read needs to be specified prior to calling this by issuing the
684  *	appropriate commands to the serial flash.
685  */
686 static int
687 sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, int lock,
688     u32 *valp)
689 {
690 	int ret;
691 
692 	if (!byte_cnt || byte_cnt > 4)
693 		return (-EINVAL);
694 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
695 		return (-EBUSY);
696 	t4_write_reg(adapter, A_SF_OP,
697 	    V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
698 	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
699 	if (!ret)
700 		*valp = t4_read_reg(adapter, A_SF_DATA);
701 	return (ret);
702 }
703 
704 /*
705  *	sf1_write - write data to the serial flash
706  *	@adapter: the adapter
707  *	@byte_cnt: number of bytes to write
708  *	@cont: whether another operation will be chained
709  *	@lock: whether to lock SF for PL access only
710  *	@val: value to write
711  *
712  *	Writes up to 4 bytes of data to the serial flash.  The location of
713  *	the write needs to be specified prior to calling this by issuing the
714  *	appropriate commands to the serial flash.
715  */
716 static int
717 sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, int lock,
718     u32 val)
719 {
720 	if (!byte_cnt || byte_cnt > 4)
721 		return (-EINVAL);
722 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
723 		return (-EBUSY);
724 	t4_write_reg(adapter, A_SF_DATA, val);
725 	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
726 	    V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
727 	return (t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5));
728 }
729 
730 /*
731  *	flash_wait_op - wait for a flash operation to complete
732  *	@adapter: the adapter
733  *	@attempts: max number of polls of the status register
734  *	@delay: delay between polls in ms
735  *
736  *	Wait for a flash operation to complete by polling the status register.
737  */
738 static int
739 flash_wait_op(struct adapter *adapter, int attempts, int d)
740 {
741 	int ret = 0;
742 	u32 status;
743 
744 	/* LINTED: E_CONSTANT_CONDITION */
745 	while (1) {
746 		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
747 		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
748 			goto done;
749 		if (!(status & 1))
750 			goto done;
751 		if (--attempts == 0) {
752 			ret = -EAGAIN;
753 			goto done;
754 		}
755 		if (d != 0)
756 			msleep(d);
757 	}
758 
759 done:
760 	return (ret);
761 }
762 
763 /*
764  *	t4_read_flash - read words from serial flash
765  *	@adapter: the adapter
766  *	@addr: the start address for the read
767  *	@nwords: how many 32-bit words to read
768  *	@data: where to store the read data
769  *	@byte_oriented: whether to store data as bytes or as words
770  *
771  *	Read the specified number of 32-bit words from the serial flash.
772  *	If @byte_oriented is set the read data is stored as a byte array
773  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
774  *	natural endianess.
775  */
776 int
777 t4_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords,
778     u32 *data, int byte_oriented)
779 {
780 	int ret;
781 
782 	if (addr + nwords * sizeof (u32) > adapter->params.sf_size ||
783 	    (addr & 3))
784 		return (-EINVAL);
785 
786 	addr = swab32(addr) | SF_RD_DATA_FAST;
787 
788 	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
789 	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
790 		return (ret);
791 
792 	for (/* */; nwords; nwords--, data++) {
793 		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
794 		if (nwords == 1)
795 			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
796 		if (ret != 0)
797 			return (ret);
798 		if (byte_oriented != 0)
799 			*data = htonl(*data);
800 	}
801 	return (0);
802 }
803 
804 /*
805  *	t4_write_flash - write up to a page of data to the serial flash
806  *	@adapter: the adapter
807  *	@addr: the start address to write
808  *	@n: length of data to write in bytes
809  *	@data: the data to write
810  *	@byte_oriented: whether to store data as bytes or as words
811  *
812  *	Writes up to a page of data (256 bytes) to the serial flash starting
813  *	at the given address.  All the data must be written to the same page.
814  *	If @byte_oriented is set the write data is stored as byte stream
815  *	(i.e. matches what on disk), otherwise in big-endian.
816  */
817 static int
818 t4_write_flash(struct adapter *adapter, unsigned int addr, unsigned int n,
819     const u8 *data, int byte_oriented)
820 {
821 	int ret;
822 	u32 buf[SF_PAGE_SIZE / 4];
823 	unsigned int i, c, left, val, offset = addr & 0xff;
824 
825 	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
826 		return (-EINVAL);
827 
828 	val = swab32(addr) | SF_PROG_PAGE;
829 
830 	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
831 	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
832 		goto unlock;
833 
834 	for (left = n; left; left -= c) {
835 		c = min(left, 4U);
836 		for (val = 0, i = 0; i < c; ++i)
837 			val = (val << 8) + *data++;
838 
839 		if (!byte_oriented)
840 			val = htonl(val);
841 
842 		ret = sf1_write(adapter, c, c != left, 1, val);
843 		if (ret != 0)
844 			goto unlock;
845 	}
846 	ret = flash_wait_op(adapter, 8, 1);
847 	if (ret != 0)
848 		goto unlock;
849 
850 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
851 
852 	/* Read the page to verify the write succeeded */
853 	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
854 	    byte_oriented);
855 	if (ret != 0)
856 		return (ret);
857 
858 	if (memcmp(data - n, (u8 *)buf + offset, n)) {
859 		CH_ERR(adapter, "failed to correctly write the flash page "
860 		    "at %#x\n", addr);
861 		return (-EIO);
862 	}
863 	return (0);
864 
865 unlock:
866 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
867 	return (ret);
868 }
869 
870 /*
871  *	t4_get_fw_version - read the firmware version
872  *	@adapter: the adapter
873  *	@vers: where to place the version
874  *
875  *	Reads the FW version from flash.
876  */
877 int
878 t4_get_fw_version(struct adapter *adapter, u32 *vers)
879 {
880 	return (t4_read_flash(adapter,
881 	    FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1, vers, 0));
882 }
883 
884 /*
885  *	t4_get_tp_version - read the TP microcode version
886  *	@adapter: the adapter
887  *	@vers: where to place the version
888  *
889  *	Reads the TP microcode version from flash.
890  */
891 int
892 t4_get_tp_version(struct adapter *adapter, u32 *vers)
893 {
894 	return (t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr,
895 	    tp_microcode_ver), 1, vers, 0));
896 }
897 
898 /*
899  *	t4_check_fw_version - check if the FW is compatible with this driver
900  *	@adapter: the adapter
901  *
902  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
903  *	if there's exact match, a negative error if the version could not be
904  *	read or there's a major version mismatch, and a positive value if the
905  *	expected major version is found but there's a minor version mismatch.
906  */
907 int
908 t4_check_fw_version(struct adapter *adapter)
909 {
910 	int ret, major, minor, micro;
911 	int exp_major, exp_minor, exp_micro;
912 
913 	ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
914 	if (!ret)
915 		ret = t4_get_tp_version(adapter, &adapter->params.tp_vers);
916 
917 	if (ret != 0)
918 		return (ret);
919 
920 	major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers);
921 	minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
922 	micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
923 
924 	switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
925 	case CHELSIO_T4:
926 		exp_major = T4FW_VERSION_MAJOR;
927 		exp_minor = T4FW_VERSION_MINOR;
928 		exp_micro = T4FW_VERSION_MICRO;
929 		break;
930 	case CHELSIO_T5:
931 		exp_major = T5FW_VERSION_MAJOR;
932 		exp_minor = T5FW_VERSION_MINOR;
933 		exp_micro = T5FW_VERSION_MICRO;
934 		break;
935 	default:
936 		CH_ERR(adapter, "Unsupported chip type, %x\n",
937 		    adapter->params.chip);
938 		return (-EINVAL);
939 	}
940 
941 	if (major != exp_major) {            /* major mismatch - fail */
942 		CH_ERR(adapter, "card FW has major version %u, driver wants "
943 		    "%u\n", major, exp_major);
944 		return -EINVAL;
945 	}
946 
947 	if (minor == exp_minor && micro == exp_micro)
948 		return (0);			/* perfect match */
949 
950 	/* Minor/micro version mismatch.  Report it but often it's OK. */
951 	return (1);
952 }
953 
954 /*
955  *	t4_flash_erase_sectors - erase a range of flash sectors
956  *	@adapter: the adapter
957  *	@start: the first sector to erase
958  *	@end: the last sector to erase
959  *
960  *	Erases the sectors in the given inclusive range.
961  */
962 static int
963 t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
964 {
965 	int ret = 0;
966 
967 	while (start <= end) {
968 		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
969 		    (ret = sf1_write(adapter, 4, 0, 1,
970 		    SF_ERASE_SECTOR | (start << 8))) != 0 ||
971 		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
972 			CH_ERR(adapter, "erase of flash sector %d failed, "
973 			    "error %d\n", start, ret);
974 			break;
975 		}
976 		start++;
977 	}
978 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
979 	return (ret);
980 }
981 
982 /*
983  *	t4_flash_cfg_addr - return the address of the flash configuration file
984  *	@adapter: the adapter
985  *
986  *	Return the address within the flash where the Firmware Configuration
987  *	File is stored, or an error if the device FLASH is too small to contain
988  *	a Firmware Configuration File.
989  */
990 int
991 t4_flash_cfg_addr(struct adapter *adapter)
992 {
993 	/*
994 	 * If the device FLASH isn't large enough to hold a Firmware
995 	 * Configuration File, return an error.
996 	 */
997 	if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
998 		return -ENOSPC;
999 
1000 	return FLASH_CFG_START;
1001 }
1002 
1003 /*
1004  *	t4_load_cfg - download config file
1005  *	@adap: the adapter
1006  *	@cfg_data: the cfg text file to write
1007  *	@size: text file size
1008  *
1009  *	Write the supplied config text file to the card's serial flash.
1010  */
1011 int
1012 t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
1013 {
1014 	int ret, i, n, cfg_addr;
1015 	unsigned int addr;
1016 	unsigned int flash_cfg_start_sec;
1017 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1018 
1019 	cfg_addr = t4_flash_cfg_addr(adap);
1020 	if (cfg_addr < 0)
1021 		return cfg_addr;
1022 
1023 	addr = cfg_addr;
1024 	flash_cfg_start_sec = addr / SF_SEC_SIZE;
1025 
1026 	if (size > FLASH_CFG_MAX_SIZE) {
1027 		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
1028 		    FLASH_CFG_MAX_SIZE);
1029 		return (-EFBIG);
1030 	}
1031 
1032 	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
1033 	    sf_sec_size);
1034 	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
1035 	    flash_cfg_start_sec + i - 1);
1036 	/*
1037 	 * If size == 0 then we're simply erasing the FLASH sectors associated
1038 	 * with the on-adapter Firmware Configuration File.
1039 	 */
1040 	if (ret || size == 0)
1041 		goto out;
1042 
1043 	/* this will write to the flash up to SF_PAGE_SIZE at a time */
1044 	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
1045 		if ( (size - i) <  SF_PAGE_SIZE)
1046 			n = size - i;
1047 		else
1048 			n = SF_PAGE_SIZE;
1049 		ret = t4_write_flash(adap, addr, n, cfg_data, 1);
1050 		if (ret != 0)
1051 			goto out;
1052 
1053 		addr += SF_PAGE_SIZE;
1054 		cfg_data += SF_PAGE_SIZE;
1055 	}
1056 
1057 out:
1058 	if (ret != 0)
1059 		CH_ERR(adap, "config file %s failed %d\n",
1060 		    (size == 0 ? "clear" : "download"), ret);
1061 	return (ret);
1062 }
1063 
1064 /*
1065  *	t4_load_fw - download firmware
1066  *	@adap: the adapter
1067  *	@fw_data: the firmware image to write
1068  *	@size: image size
1069  *
1070  *	Write the supplied firmware image to the card's serial flash.
1071  */
1072 int
1073 t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1074 {
1075 	u32 csum;
1076 	int ret, addr;
1077 	unsigned int i;
1078 	u8 first_page[SF_PAGE_SIZE];
1079 	/* LINTED: E_BAD_PTR_CAST_ALIGN */
1080 	const u32 *p = (const u32 *)fw_data;
1081 	/* LINTED: E_BAD_PTR_CAST_ALIGN */
1082 	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
1083 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1084 
1085 	unsigned int fw_start_sec;
1086 	unsigned int fw_start;
1087 	unsigned int fw_size;
1088 
1089 	if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
1090 		fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
1091 		fw_start = FLASH_FWBOOTSTRAP_START;
1092 		fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
1093 	} else {
1094 		fw_start_sec = FLASH_FW_START_SEC;
1095  		fw_start = FLASH_FW_START;
1096 		fw_size = FLASH_FW_MAX_SIZE;
1097 	}
1098 
1099 	if (!size) {
1100 		CH_ERR(adap, "FW image has no data\n");
1101 		return (-EINVAL);
1102 	}
1103 	if (size & 511) {
1104 		CH_ERR(adap, "FW image size not multiple of 512 bytes\n");
1105 		return (-EINVAL);
1106 	}
1107 	if (ntohs(hdr->len512) * 512 != size) {
1108 		CH_ERR(adap, "FW image size differs from size in FW header\n");
1109 		return (-EINVAL);
1110 	}
1111 	if (size > fw_size) {
1112 		CH_ERR(adap, "FW image too large, max is %u bytes\n", fw_size);
1113 		return (-EFBIG);
1114 	}
1115 	if ((is_t4(adap->params.chip) && hdr->chip != FW_HDR_CHIP_T4) ||
1116 	    (is_t5(adap->params.chip) && hdr->chip != FW_HDR_CHIP_T5)) {
1117 		CH_ERR(adap,
1118 		    "FW image (%d) is not suitable for this adapter (%d)\n",
1119 		    hdr->chip, adap->params.chip);
1120 		return -EINVAL;
1121 	}
1122 
1123 	for (csum = 0, i = 0; i < size / sizeof (csum); i++)
1124 		csum += ntohl(p[i]);
1125 
1126 	if (csum != 0xffffffff) {
1127 		CH_ERR(adap, "corrupted firmware image, checksum %x\n",
1128 		    csum);
1129 		return (-EINVAL);
1130 	}
1131 
1132 	i = DIV_ROUND_UP(size, sf_sec_size);	/* # of sectors spanned */
1133 	ret = t4_flash_erase_sectors(adap, fw_start_sec,
1134 	    fw_start_sec + i - 1);
1135 	if (ret != 0)
1136 		goto out;
1137 
1138 	/*
1139 	 * We write the correct version at the end so the driver can see a bad
1140 	 * version if the FW write fails.  Start by writing a copy of the
1141 	 * first page with a bad version.
1142 	 */
1143 	(void) memcpy(first_page, fw_data, SF_PAGE_SIZE);
1144 	/* LINTED: E_BAD_PTR_CAST_ALIGN */
1145 	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1146 	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
1147 	if (ret != 0)
1148 		goto out;
1149 
1150 	addr = fw_start;
1151 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1152 		addr += SF_PAGE_SIZE;
1153 		fw_data += SF_PAGE_SIZE;
1154 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
1155 		if (ret != 0)
1156 			goto out;
1157 	}
1158 
1159 	ret = t4_write_flash(adap, fw_start + offsetof(struct fw_hdr, fw_ver),
1160 	    sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
1161 out:
1162 	if (ret != 0)
1163 		CH_ERR(adap, "firmware download failed, error %d\n", ret);
1164 	return (ret);
1165 }
1166 
1167 /*
1168  *	t4_read_cimq_cfg - read CIM queue configuration
1169  *	@adap: the adapter
1170  *	@base: holds the queue base addresses in bytes
1171  *	@size: holds the queue sizes in bytes
1172  *	@thres: holds the queue full thresholds in bytes
1173  *
1174  *	Returns the current configuration of the CIM queues, starting with
1175  *	the IBQs, then the OBQs.
1176  */
1177 void
1178 t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
1179 {
1180 	unsigned int i, v;
1181 	int cim_num_obq = is_t4(adap->params.chip) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
1182 
1183 	for (i = 0; i < CIM_NUM_IBQ; i++) {
1184 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
1185 		    V_QUENUMSELECT(i));
1186 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1187 		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1188 		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1189 		*thres++ = G_QUEFULLTHRSH(v) * 8;   /* 8-byte unit */
1190 	}
1191 	for (i = 0; i < cim_num_obq; i++) {
1192 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1193 		    V_QUENUMSELECT(i));
1194 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1195 		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1196 		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1197 	}
1198 }
1199 
1200 /*
1201  *	t4_read_cim_ibq - read the contents of a CIM inbound queue
1202  *	@adap: the adapter
1203  *	@qid: the queue index
1204  *	@data: where to store the queue contents
1205  *	@n: capacity of @data in 32-bit words
1206  *
1207  *	Reads the contents of the selected CIM queue starting at address 0 up
1208  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1209  *	error and the number of 32-bit words actually read on success.
1210  */
1211 int
1212 t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1213 {
1214 	int i, err;
1215 	unsigned int addr;
1216 	const unsigned int nwords = CIM_IBQ_SIZE * 4;
1217 
1218 	int cim_num_obq = is_t4(adap->params.chip) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
1219 
1220 	if (qid >= cim_num_obq || (n & 3))
1221 		return (-EINVAL);
1222 
1223 	addr = qid * nwords;
1224 	if (n > nwords)
1225 		n = nwords;
1226 
1227 	for (i = 0; i < n; i++, addr++) {
1228 		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
1229 		    F_IBQDBGEN);
1230 		/*
1231 		 * It might take 3-10ms before the IBQ debug read access is
1232 		 * allowed. Wait for 1 Sec with a delay of 1 usec.
1233 		 */
1234 		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
1235 		    1000000, 1);
1236 		if (err != 0)
1237 			return (err);
1238 		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
1239 	}
1240 	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
1241 	return (i);
1242 }
1243 
1244 /*
1245  *	t4_read_cim_obq - read the contents of a CIM outbound queue
1246  *	@adap: the adapter
1247  *	@qid: the queue index
1248  *	@data: where to store the queue contents
1249  *	@n: capacity of @data in 32-bit words
1250  *
1251  *	Reads the contents of the selected CIM queue starting at address 0 up
1252  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1253  *	error and the number of 32-bit words actually read on success.
1254  */
1255 int
1256 t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1257 {
1258 	int i, err;
1259 	unsigned int addr, v, nwords;
1260 
1261 	if (qid > 5 || (n & 3))
1262 		return (-EINVAL);
1263 
1264 	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1265 	    V_QUENUMSELECT(qid));
1266 	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1267 
1268 	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
1269 	nwords = G_CIMQSIZE(v) * 64;  /* same */
1270 	if (n > nwords)
1271 		n = nwords;
1272 
1273 	for (i = 0; i < n; i++, addr++) {
1274 		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
1275 		    F_OBQDBGEN);
1276 		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
1277 		    2, 1);
1278 		if (err != 0)
1279 			return (err);
1280 		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
1281 	}
1282 	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
1283 	return (i);
1284 }
1285 
1286 enum {
1287 	CIM_QCTL_BASE	  = 0,
1288 	CIM_CTL_BASE	  = 0x2000,
1289 	CIM_PBT_ADDR_BASE = 0x2800,
1290 	CIM_PBT_LRF_BASE  = 0x3000,
1291 	CIM_PBT_DATA_BASE = 0x3800
1292 };
1293 
1294 /*
1295  *	t4_cim_read - read a block from CIM internal address space
1296  *	@adap: the adapter
1297  *	@addr: the start address within the CIM address space
1298  *	@n: number of words to read
1299  *	@valp: where to store the result
1300  *
1301  *	Reads a block of 4-byte words from the CIM intenal address space.
1302  */
1303 int
1304 t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1305     unsigned int *valp)
1306 {
1307 	int ret = 0;
1308 
1309 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1310 		return (-EBUSY);
1311 
1312 	for (/* */; !ret && n--; addr += 4) {
1313 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
1314 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1315 		    0, 5, 2);
1316 		if (!ret)
1317 			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
1318 	}
1319 	return (ret);
1320 }
1321 
1322 /*
1323  *	t4_cim_write - write a block into CIM internal address space
1324  *	@adap: the adapter
1325  *	@addr: the start address within the CIM address space
1326  *	@n: number of words to write
1327  *	@valp: set of values to write
1328  *
1329  *	Writes a block of 4-byte words into the CIM intenal address space.
1330  */
1331 int
1332 t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1333     const unsigned int *valp)
1334 {
1335 	int ret = 0;
1336 
1337 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1338 		return (-EBUSY);
1339 
1340 	for (/* */; !ret && n--; addr += 4) {
1341 		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
1342 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
1343 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1344 		    0, 5, 2);
1345 	}
1346 	return (ret);
1347 }
1348 
1349 static int
1350 t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val)
1351 {
1352 	return (t4_cim_write(adap, addr, 1, &val));
1353 }
1354 
1355 /*
1356  *	t4_cim_ctl_read - read a block from CIM control region
1357  *	@adap: the adapter
1358  *	@addr: the start address within the CIM control region
1359  *	@n: number of words to read
1360  *	@valp: where to store the result
1361  *
1362  *	Reads a block of 4-byte words from the CIM control region.
1363  */
1364 int
1365 t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
1366     unsigned int *valp)
1367 {
1368 	return (t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp));
1369 }
1370 
1371 /*
1372  *	t4_cim_read_la - read CIM LA capture buffer
1373  *	@adap: the adapter
1374  *	@la_buf: where to store the LA data
1375  *	@wrptr: the HW write pointer within the capture buffer
1376  *
1377  *	Reads the contents of the CIM LA buffer with the most recent entry at
1378  *	the end	of the returned data and with the entry at @wrptr first.
1379  *	We try to leave the LA in the running state we find it in.
1380  */
1381 int
1382 t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
1383 {
1384 	int i, ret;
1385 	unsigned int cfg, val, idx;
1386 
1387 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
1388 	if (ret != 0)
1389 		return (ret);
1390 
1391 	if (cfg & F_UPDBGLAEN) {	/* LA is running, freeze it */
1392 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
1393 		if (ret != 0)
1394 			return (ret);
1395 	}
1396 
1397 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1398 	if (ret != 0)
1399 		goto restart;
1400 
1401 	idx = G_UPDBGLAWRPTR(val);
1402 	if (wrptr != 0)
1403 		*wrptr = idx;
1404 
1405 	for (i = 0; i < adap->params.cim_la_size; i++) {
1406 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1407 		    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
1408 		if (ret != 0)
1409 			break;
1410 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1411 		if (ret != 0)
1412 			break;
1413 		if (val & F_UPDBGLARDEN) {
1414 			ret = -ETIMEDOUT;
1415 			break;
1416 		}
1417 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
1418 		if (ret != 0)
1419 			break;
1420 		idx = (idx + 1) & M_UPDBGLARDPTR;
1421 	}
1422 restart:
1423 	if (cfg & F_UPDBGLAEN) {
1424 		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1425 		    cfg & ~F_UPDBGLARDEN);
1426 		if (!ret)
1427 			ret = r;
1428 	}
1429 	return (ret);
1430 }
1431 
1432 void
1433 t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
1434     unsigned int *pif_req_wrptr, unsigned int *pif_rsp_wrptr)
1435 {
1436 	int i, j;
1437 	u32 cfg, val, req, rsp;
1438 
1439 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1440 	if (cfg & F_LADBGEN)
1441 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1442 
1443 	val = t4_read_reg(adap, A_CIM_DEBUGSTS);
1444 	req = G_POLADBGWRPTR(val);
1445 	rsp = G_PILADBGWRPTR(val);
1446 	if (pif_req_wrptr != NULL)
1447 		*pif_req_wrptr = req;
1448 	if (pif_rsp_wrptr != NULL)
1449 		*pif_rsp_wrptr = rsp;
1450 
1451 	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
1452 		for (j = 0; j < 6; j++) {
1453 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
1454 			    V_PILADBGRDPTR(rsp));
1455 			*pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
1456 			*pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
1457 			req++;
1458 			rsp++;
1459 		}
1460 		req = (req + 2) & M_POLADBGRDPTR;
1461 		rsp = (rsp + 2) & M_PILADBGRDPTR;
1462 	}
1463 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1464 }
1465 
1466 void
1467 t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
1468 {
1469 	u32 cfg;
1470 	int i, j, idx;
1471 
1472 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1473 	if (cfg & F_LADBGEN)
1474 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1475 
1476 	for (i = 0; i < CIM_MALA_SIZE; i++) {
1477 		for (j = 0; j < 5; j++) {
1478 			idx = 8 * i + j;
1479 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
1480 			    V_PILADBGRDPTR(idx));
1481 			*ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
1482 			*ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
1483 		}
1484 	}
1485 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1486 }
1487 
1488 /*
1489  *	t4_tp_read_la - read TP LA capture buffer
1490  *	@adap: the adapter
1491  *	@la_buf: where to store the LA data
1492  *	@wrptr: the HW write pointer within the capture buffer
1493  *
1494  *	Reads the contents of the TP LA buffer with the most recent entry at
1495  *	the end	of the returned data and with the entry at @wrptr first.
1496  *	We leave the LA in the running state we find it in.
1497  */
1498 void
1499 t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
1500 {
1501 	bool last_incomplete;
1502 	unsigned int i, cfg, val, idx;
1503 
1504 	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
1505 	if (cfg & F_DBGLAENABLE)		/* freeze LA */
1506 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1507 		    adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
1508 
1509 	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
1510 	idx = G_DBGLAWPTR(val);
1511 	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
1512 	if (last_incomplete != 0)
1513 		idx = (idx + 1) & M_DBGLARPTR;
1514 	if (wrptr != NULL)
1515 		*wrptr = idx;
1516 
1517 	val &= 0xffff;
1518 	val &= ~V_DBGLARPTR(M_DBGLARPTR);
1519 	val |= adap->params.tp.la_mask;
1520 
1521 	for (i = 0; i < TPLA_SIZE; i++) {
1522 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
1523 		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
1524 		idx = (idx + 1) & M_DBGLARPTR;
1525 	}
1526 
1527 	/* Wipe out last entry if it isn't valid */
1528 	if (last_incomplete != 0)
1529 		la_buf[TPLA_SIZE - 1] = ~0ULL;
1530 
1531 	if (cfg & F_DBGLAENABLE)		/* restore running state */
1532 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1533 		    cfg | adap->params.tp.la_mask);
1534 }
1535 
1536 void
1537 t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
1538 {
1539 	unsigned int i, j;
1540 
1541 	for (i = 0; i < 8; i++) {
1542 		u32 *p = la_buf + i;
1543 
1544 		t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
1545 		j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
1546 		t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
1547 		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
1548 			*p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
1549 	}
1550 }
1551 
1552 #define	ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1553 		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1554 		     FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
1555 /*
1556  *	t4_link_start - apply link configuration to MAC/PHY
1557  *	@phy: the PHY to setup
1558  *	@mac: the MAC to setup
1559  *	@lc: the requested link configuration
1560  *
1561  *	Set up a port's MAC and PHY according to a desired link configuration.
1562  *	- If the PHY can auto-negotiate first decide what to advertise, then
1563  *	  enable/disable auto-negotiation as desired, and reset.
1564  *	- If the PHY does not auto-negotiate just reset it.
1565  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1566  *	  otherwise do it later based on the outcome of auto-negotiation.
1567  */
1568 int
1569 t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1570     struct link_config *lc)
1571 {
1572 	struct fw_port_cmd c;
1573 	unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
1574 
1575 	lc->link_ok = 0;
1576 	if (lc->requested_fc & PAUSE_RX)
1577 		fc |= FW_PORT_CAP_FC_RX;
1578 	if (lc->requested_fc & PAUSE_TX)
1579 		fc |= FW_PORT_CAP_FC_TX;
1580 
1581 	(void) memset(&c, 0, sizeof (c));
1582 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1583 	    F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1584 	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1585 	    FW_LEN16(c));
1586 
1587 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1588 		c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1589 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1590 	} else if (lc->autoneg == AUTONEG_DISABLE) {
1591 		c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1592 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1593 	} else
1594 		c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1595 
1596 	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
1597 }
1598 
1599 /*
1600  *	t4_restart_aneg - restart autonegotiation
1601  *	@adap: the adapter
1602  *	@mbox: mbox to use for the FW command
1603  *	@port: the port id
1604  *
1605  *	Restarts autonegotiation for the selected port.
1606  */
1607 int
1608 t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1609 {
1610 	struct fw_port_cmd c;
1611 
1612 	(void) memset(&c, 0, sizeof (c));
1613 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1614 	    F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1615 	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1616 	    FW_LEN16(c));
1617 	c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1618 	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
1619 }
1620 
1621 struct intr_info {
1622 	unsigned int mask;	/* bits to check in interrupt status */
1623 	const char *msg;	/* message to print or NULL */
1624 	short stat_idx;		/* stat counter to increment or -1 */
1625 	unsigned short fatal;	/* whether the condition reported is fatal */
1626 };
1627 
1628 /*
1629  *	t4_handle_intr_status - table driven interrupt handler
1630  *	@adapter: the adapter that generated the interrupt
1631  *	@reg: the interrupt status register to process
1632  *	@acts: table of interrupt actions
1633  *
1634  *	A table driven interrupt handler that applies a set of masks to an
1635  *	interrupt status word and performs the corresponding actions if the
1636  *	interrupts described by the mask have occured.  The actions include
1637  *	optionally emitting a warning or alert message.  The table is terminated
1638  *	by an entry specifying mask 0.  Returns the number of fatal interrupt
1639  *	conditions.
1640  */
1641 static int
1642 t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1643     const struct intr_info *acts)
1644 {
1645 	int fatal = 0;
1646 	unsigned int mask = 0;
1647 	unsigned int status = t4_read_reg(adapter, reg);
1648 
1649 	for (/* */; acts->mask; ++acts) {
1650 		if (!(status & acts->mask))
1651 			continue;
1652 		if (acts->fatal != 0) {
1653 			fatal++;
1654 			CH_ALERT(adapter, "%s (0x%x)\n",
1655 			    acts->msg, status & acts->mask);
1656 		} else if (acts->msg != NULL)
1657 			CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n",
1658 			    acts->msg, status & acts->mask);
1659 		mask |= acts->mask;
1660 	}
1661 	status &= mask;
1662 	if (status != 0)		/* clear processed interrupts */
1663 		t4_write_reg(adapter, reg, status);
1664 	return (fatal);
1665 }
1666 
1667 /*
1668  * Interrupt handler for the PCIE module.
1669  */
1670 static void
1671 pcie_intr_handler(struct adapter *adapter)
1672 {
1673 	static struct intr_info sysbus_intr_info[] = {
1674 		{ F_RNPP, "RXNP array parity error", -1, 1 },
1675 		{ F_RPCP, "RXPC array parity error", -1, 1 },
1676 		{ F_RCIP, "RXCIF array parity error", -1, 1 },
1677 		{ F_RCCP, "Rx completions control array parity error", -1, 1 },
1678 		{ F_RFTP, "RXFT array parity error", -1, 1 },
1679 		{ 0 }
1680 	};
1681 	static struct intr_info pcie_port_intr_info[] = {
1682 		{ F_TPCP, "TXPC array parity error", -1, 1 },
1683 		{ F_TNPP, "TXNP array parity error", -1, 1 },
1684 		{ F_TFTP, "TXFT array parity error", -1, 1 },
1685 		{ F_TCAP, "TXCA array parity error", -1, 1 },
1686 		{ F_TCIP, "TXCIF array parity error", -1, 1 },
1687 		{ F_RCAP, "RXCA array parity error", -1, 1 },
1688 		{ F_OTDD, "outbound request TLP discarded", -1, 1 },
1689 		{ F_RDPE, "Rx data parity error", -1, 1 },
1690 		{ F_TDUE, "Tx uncorrectable data error", -1, 1 },
1691 		{ 0 }
1692 	};
1693 	static struct intr_info pcie_intr_info[] = {
1694 		{ F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1695 		{ F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1696 		{ F_MSIDATAPERR, "MSI data parity error", -1, 1 },
1697 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1698 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1699 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1700 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1701 		{ F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1702 		{ F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1703 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1704 		{ F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1705 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1706 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1707 		{ F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1708 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1709 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1710 		{ F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1711 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1712 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1713 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1714 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
1715 		{ F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1716 		{ F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
1717 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1718 		{ F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1719 		{ F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
1720 		{ F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
1721 		{ F_PCIESINT, "PCI core secondary fault", -1, 1 },
1722 		{ F_PCIEPINT, "PCI core primary fault", -1, 1 },
1723 		{ F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
1724 		    0 },
1725 		{ 0 }
1726 	};
1727 
1728 	static struct intr_info t5_pcie_intr_info[] = {
1729 		{ F_MSTGRPPERR, "Master Response Read Queue parity error",
1730 		    -1, 1 },
1731 		{ F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
1732 		{ F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
1733 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1734 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1735 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1736 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1737 		{ F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
1738 		    -1, 1 },
1739 		{ F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
1740 		    -1, 1 },
1741 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1742 		{ F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
1743 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1744 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1745 		{ F_DREQWRPERR, "PCI DMA channel write request parity error",
1746 		    -1, 1 },
1747 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1748 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1749 		{ F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
1750 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1751 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1752 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1753 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
1754 		{ F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
1755 		{ F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
1756 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1757 		{ F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
1758 		    -1, 1 },
1759 		{ F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
1760 		    -1, 1 },
1761 		{ F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
1762 		{ F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
1763 		{ F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
1764 		{ F_READRSPERR, "Outbound read error", -1,
1765 		    0 },
1766 		{ 0 }
1767 	};
1768 
1769 	int fat;
1770 
1771 	fat = t4_handle_intr_status(adapter,
1772 	    A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, sysbus_intr_info) +
1773 	    t4_handle_intr_status(adapter,
1774 	    A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, pcie_port_intr_info) +
1775 	    t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
1776 	    is_t4(adapter->params.chip) ?
1777 	    pcie_intr_info : t5_pcie_intr_info);
1778 	if (fat != 0)
1779 		t4_fatal_err(adapter);
1780 }
1781 
1782 /*
1783  * TP interrupt handler.
1784  */
1785 static void
1786 tp_intr_handler(struct adapter *adapter)
1787 {
1788 	static struct intr_info tp_intr_info[] = {
1789 		{ 0x3fffffff, "TP parity error", -1, 1 },
1790 		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1791 		{ 0 }
1792 	};
1793 
1794 	if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info) != 0)
1795 		t4_fatal_err(adapter);
1796 }
1797 
1798 /*
1799  * SGE interrupt handler.
1800  */
1801 static void
1802 sge_intr_handler(struct adapter *adapter)
1803 {
1804 	u64 v;
1805 	u32 err;
1806 
1807 	static struct intr_info sge_intr_info[] = {
1808 		{ F_ERR_CPL_EXCEED_IQE_SIZE,
1809 		    "SGE received CPL exceeding IQE size", -1, 1 },
1810 		{ F_ERR_INVALID_CIDX_INC,
1811 		    "SGE GTS CIDX increment too large", -1, 0 },
1812 		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1813 		{ F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1814 		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
1815 		    "SGE IQID > 1023 received CPL for FL", -1, 0 },
1816 		{ F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1817 		    0 },
1818 		{ F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1819 		    0 },
1820 		{ F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1821 		    0 },
1822 		{ F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1823 		    0 },
1824 		{ F_ERR_ING_CTXT_PRIO,
1825 		    "SGE too many priority ingress contexts", -1, 0 },
1826 		{ F_ERR_EGR_CTXT_PRIO,
1827 		    "SGE too many priority egress contexts", -1, 0 },
1828 		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1829 		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1830 		{ 0 }
1831 	};
1832 
1833 	v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
1834 	    ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
1835 	if (v != 0) {
1836 		CH_ALERT(adapter, "SGE parity error (%llx)\n",
1837 		    (unsigned long long)v);
1838 		t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
1839 		t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
1840 	}
1841 
1842 	v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
1843 
1844 	err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
1845 	if (err & F_ERROR_QID_VALID) {
1846 		CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
1847 		if (err & F_UNCAPTURED_ERROR)
1848 			CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
1849 		t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
1850 		    F_UNCAPTURED_ERROR);
1851 	}
1852 
1853 	if (v != 0)
1854 		t4_fatal_err(adapter);
1855 }
1856 
1857 #define	CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
1858 	F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
1859 #define	CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
1860 	F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
1861 
1862 /*
1863  * CIM interrupt handler.
1864  */
1865 static void
1866 cim_intr_handler(struct adapter *adapter)
1867 {
1868 	static struct intr_info cim_intr_info[] = {
1869 		{ F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1870 		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
1871 		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
1872 		{ F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1873 		{ F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1874 		{ F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1875 		{ F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1876 		{ 0 }
1877 	};
1878 	static struct intr_info cim_upintr_info[] = {
1879 		{ F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1880 		{ F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1881 		{ F_ILLWRINT, "CIM illegal write", -1, 1 },
1882 		{ F_ILLRDINT, "CIM illegal read", -1, 1 },
1883 		{ F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1884 		{ F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1885 		{ F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1886 		{ F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1887 		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1888 		{ F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1889 		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1890 		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1891 		{ F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1892 		{ F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1893 		{ F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1894 		{ F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1895 		{ F_SGLRDCTLINT, "CIM single read from CTL space", -1, 1 },
1896 		{ F_SGLWRCTLINT, "CIM single write to CTL space", -1, 1 },
1897 		{ F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 },
1898 		{ F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 },
1899 		{ F_SGLRDPLINT, "CIM single read from PL space", -1, 1 },
1900 		{ F_SGLWRPLINT, "CIM single write to PL space", -1, 1 },
1901 		{ F_BLKRDPLINT, "CIM block read from PL space", -1, 1 },
1902 		{ F_BLKWRPLINT, "CIM block write to PL space", -1, 1 },
1903 		{ F_REQOVRLOOKUPINT, "CIM request FIFO overwrite", -1, 1 },
1904 		{ F_RSPOVRLOOKUPINT, "CIM response FIFO overwrite", -1, 1 },
1905 		{ F_TIMEOUTINT, "CIM PIF timeout", -1, 1 },
1906 		{ F_TIMEOUTMAINT, "CIM PIF MA timeout", -1, 1 },
1907 		{ 0 }
1908 	};
1909 
1910 	int fat;
1911 
1912 	if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR)
1913 		t4_report_fw_error(adapter);
1914 
1915 	fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
1916 	    cim_intr_info) +
1917 	    t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
1918 	    cim_upintr_info);
1919 	if (fat != 0)
1920 		t4_fatal_err(adapter);
1921 }
1922 
1923 /*
1924  * ULP RX interrupt handler.
1925  */
1926 static void
1927 ulprx_intr_handler(struct adapter *adapter)
1928 {
1929 	static struct intr_info ulprx_intr_info[] = {
1930 		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
1931 		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
1932 		{ 0x7fffff, "ULPRX parity error", -1, 1 },
1933 		{ 0 }
1934 	};
1935 
1936 	if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info)
1937 	    != 0)
1938 		t4_fatal_err(adapter);
1939 }
1940 
1941 /*
1942  * ULP TX interrupt handler.
1943  */
1944 static void
1945 ulptx_intr_handler(struct adapter *adapter)
1946 {
1947 	static struct intr_info ulptx_intr_info[] = {
1948 		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1949 		    0 },
1950 		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1951 		    0 },
1952 		{ F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1953 		    0 },
1954 		{ F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1955 		    0 },
1956 		{ 0xfffffff, "ULPTX parity error", -1, 1 },
1957 		{ 0 }
1958 	};
1959 
1960 	if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info)
1961 	    != 0)
1962 		t4_fatal_err(adapter);
1963 }
1964 
1965 /*
1966  * PM TX interrupt handler.
1967  */
1968 static void
1969 pmtx_intr_handler(struct adapter *adapter)
1970 {
1971 	static struct intr_info pmtx_intr_info[] = {
1972 		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1973 		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1974 		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1975 		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1976 		{ 0xffffff0, "PMTX framing error", -1, 1 },
1977 		{ F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1978 		{ F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
1979 		    1 },
1980 		{ F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1981 		{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1982 		{ 0 }
1983 	};
1984 
1985 	if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
1986 		t4_fatal_err(adapter);
1987 }
1988 
1989 /*
1990  * PM RX interrupt handler.
1991  */
1992 static void
1993 pmrx_intr_handler(struct adapter *adapter)
1994 {
1995 	static struct intr_info pmrx_intr_info[] = {
1996 		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1997 		{ 0x3ffff0, "PMRX framing error", -1, 1 },
1998 		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1999 		{ F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
2000 		    1 },
2001 		{ F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
2002 		{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
2003 		{ 0 }
2004 	};
2005 
2006 	if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
2007 		t4_fatal_err(adapter);
2008 }
2009 
2010 /*
2011  * CPL switch interrupt handler.
2012  */
2013 static void
2014 cplsw_intr_handler(struct adapter *adapter)
2015 {
2016 	static struct intr_info cplsw_intr_info[] = {
2017 		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
2018 		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
2019 		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
2020 		{ F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
2021 		{ F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
2022 		{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
2023 		{ 0 }
2024 	};
2025 
2026 	if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
2027 		t4_fatal_err(adapter);
2028 }
2029 
2030 /*
2031  * LE interrupt handler.
2032  */
2033 static void
2034 le_intr_handler(struct adapter *adap)
2035 {
2036 	static struct intr_info le_intr_info[] = {
2037 		{ F_LIPMISS, "LE LIP miss", -1, 0 },
2038 		{ F_LIP0, "LE 0 LIP error", -1, 0 },
2039 		{ F_PARITYERR, "LE parity error", -1, 1 },
2040 		{ F_UNKNOWNCMD, "LE unknown command", -1, 1 },
2041 		{ F_REQQPARERR, "LE request queue parity error", -1, 1 },
2042 		{ 0 }
2043 	};
2044 
2045 	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info))
2046 		t4_fatal_err(adap);
2047 }
2048 
2049 /*
2050  * MPS interrupt handler.
2051  */
2052 static void
2053 mps_intr_handler(struct adapter *adapter)
2054 {
2055 	static struct intr_info mps_rx_intr_info[] = {
2056 		{ 0xffffff, "MPS Rx parity error", -1, 1 },
2057 		{ 0 }
2058 	};
2059 	static struct intr_info mps_tx_intr_info[] = {
2060 		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
2061 		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2062 		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
2063 		    -1, 1 },
2064 		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
2065 		    -1, 1 },
2066 		{ F_BUBBLE, "MPS Tx underflow", -1, 1 },
2067 		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
2068 		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
2069 		{ 0 }
2070 	};
2071 	static struct intr_info mps_trc_intr_info[] = {
2072 		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
2073 		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
2074 		    1 },
2075 		{ F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
2076 		{ 0 }
2077 	};
2078 	static struct intr_info mps_stat_sram_intr_info[] = {
2079 		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2080 		{ 0 }
2081 	};
2082 	static struct intr_info mps_stat_tx_intr_info[] = {
2083 		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2084 		{ 0 }
2085 	};
2086 	static struct intr_info mps_stat_rx_intr_info[] = {
2087 		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2088 		{ 0 }
2089 	};
2090 	static struct intr_info mps_cls_intr_info[] = {
2091 		{ F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
2092 		{ F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
2093 		{ F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
2094 		{ 0 }
2095 	};
2096 
2097 	int fat;
2098 
2099 	fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
2100 	    mps_rx_intr_info) +
2101 	    t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
2102 	    mps_tx_intr_info) +
2103 	    t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
2104 	    mps_trc_intr_info) +
2105 	    t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
2106 	    mps_stat_sram_intr_info) +
2107 	    t4_handle_intr_status(adapter,
2108 	    A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
2109 	    mps_stat_tx_intr_info) +
2110 	    t4_handle_intr_status(adapter,
2111 	    A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
2112 	    mps_stat_rx_intr_info) +
2113 	    t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
2114 	    mps_cls_intr_info);
2115 
2116 	t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
2117 	(void) t4_read_reg(adapter, A_MPS_INT_CAUSE);	/* flush */
2118 	if (fat != 0)
2119 		t4_fatal_err(adapter);
2120 }
2121 
2122 #define	MEM_INT_MASK \
2123 	(F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE)
2124 
2125 /*
2126  * EDC/MC interrupt handler.
2127  */
2128 static void
2129 mem_intr_handler(struct adapter *adapter, int idx)
2130 {
2131 	static const char name[3][5] = { "EDC0", "EDC1", "MC" };
2132 
2133 	unsigned int addr, cnt_addr, v;
2134 
2135 	if (idx <= MEM_EDC1) {
2136 		addr = EDC_REG(A_EDC_INT_CAUSE, idx);
2137 		cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
2138 	} else {
2139 		if (is_t4(adapter->params.chip)) {
2140 			addr = A_MC_INT_CAUSE;
2141 			cnt_addr = A_MC_ECC_STATUS;
2142 		} else {
2143 			addr = A_MC_P_INT_CAUSE;
2144 			cnt_addr = A_MC_P_ECC_STATUS;
2145 		}
2146 	}
2147 
2148 	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
2149 	if (v & F_PERR_INT_CAUSE)
2150 		CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]);
2151 	if (v & F_ECC_CE_INT_CAUSE) {
2152 		u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
2153 
2154 		t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
2155 		CH_WARN_RATELIMIT(adapter,
2156 		    "%u %s correctable ECC data error%s\n", cnt, name[idx],
2157 		    cnt > 1 ? "s" : "");
2158 	}
2159 	if (v & F_ECC_UE_INT_CAUSE)
2160 		CH_ALERT(adapter, "%s uncorrectable ECC data error\n",
2161 		    name[idx]);
2162 
2163 	t4_write_reg(adapter, addr, v);
2164 	if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
2165 		t4_fatal_err(adapter);
2166 }
2167 
2168 /*
2169  * MA interrupt handler.
2170  */
2171 static void
2172 ma_intr_handler(struct adapter *adapter)
2173 {
2174 	u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
2175 
2176 	if (status & F_MEM_PERR_INT_CAUSE)
2177 		CH_ALERT(adapter, "MA parity error, parity status %x\n",
2178 		    t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS));
2179 	if (status & F_MEM_WRAP_INT_CAUSE) {
2180 		v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
2181 		CH_ALERT(adapter, "MA address wrap-around error by client %u to"
2182 		    " address %x\n", G_MEM_WRAP_CLIENT_NUM(v),
2183 		    G_MEM_WRAP_ADDRESS(v) << 4);
2184 	}
2185 	t4_write_reg(adapter, A_MA_INT_CAUSE, status);
2186 	t4_fatal_err(adapter);
2187 }
2188 
2189 /*
2190  * SMB interrupt handler.
2191  */
2192 static void
2193 smb_intr_handler(struct adapter *adap)
2194 {
2195 	static struct intr_info smb_intr_info[] = {
2196 		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
2197 		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
2198 		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
2199 		{ 0 }
2200 	};
2201 
2202 	if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info) != 0)
2203 		t4_fatal_err(adap);
2204 }
2205 
2206 /*
2207  * NC-SI interrupt handler.
2208  */
2209 static void
2210 ncsi_intr_handler(struct adapter *adap)
2211 {
2212 	static struct intr_info ncsi_intr_info[] = {
2213 		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
2214 		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
2215 		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
2216 		{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
2217 		{ 0 }
2218 	};
2219 
2220 	if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info) != 0)
2221 		t4_fatal_err(adap);
2222 }
2223 
2224 /*
2225  * XGMAC interrupt handler.
2226  */
2227 static void
2228 xgmac_intr_handler(struct adapter *adap, int port)
2229 {
2230 	u32 v, int_cause_reg;
2231 
2232 	if (is_t4(adap->params.chip))
2233 		int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
2234 	else
2235 		int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
2236 
2237 	v = t4_read_reg(adap, int_cause_reg);
2238 	v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
2239 
2240 	if (!v)
2241 		return;
2242 
2243 	if (v & F_TXFIFO_PRTY_ERR)
2244 		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port);
2245 	if (v & F_RXFIFO_PRTY_ERR)
2246 		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port);
2247 	t4_write_reg(adap, int_cause_reg, v);
2248 	t4_fatal_err(adap);
2249 }
2250 
2251 /*
2252  * PL interrupt handler.
2253  */
2254 static void
2255 pl_intr_handler(struct adapter *adap)
2256 {
2257 	static struct intr_info pl_intr_info[] = {
2258 		{ F_FATALPERR, "Fatal parity error", -1, 1 },
2259 		{ F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
2260 		{ 0 }
2261 	};
2262 
2263 	static struct intr_info t5_pl_intr_info[] = {
2264 		{ F_PL_BUSPERR, "PL bus parity error", -1, 1 },
2265 		{ F_FATALPERR, "Fatal parity error", -1, 1 },
2266 		{ 0 }
2267 	};
2268 
2269 	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
2270 	    is_t4(adap->params.chip) ? pl_intr_info : t5_pl_intr_info))
2271 		t4_fatal_err(adap);
2272 }
2273 
2274 #define	PF_INTR_MASK (F_PFSW | F_PFCIM)
2275 #define	GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
2276 		F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
2277 		F_CPL_SWITCH | F_SGE | F_ULP_TX)
2278 
2279 /*
2280  *	t4_slow_intr_handler - control path interrupt handler
2281  *	@adapter: the adapter
2282  *
2283  *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
2284  *	The designation 'slow' is because it involves register reads, while
2285  *	data interrupts typically don't involve any MMIOs.
2286  */
2287 int
2288 t4_slow_intr_handler(struct adapter *adapter)
2289 {
2290 	u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
2291 
2292 	if (!(cause & GLBL_INTR_MASK))
2293 		return (0);
2294 	if (cause & F_CIM)
2295 		cim_intr_handler(adapter);
2296 	if (cause & F_MPS)
2297 		mps_intr_handler(adapter);
2298 	if (cause & F_NCSI)
2299 		ncsi_intr_handler(adapter);
2300 	if (cause & F_PL)
2301 		pl_intr_handler(adapter);
2302 	if (cause & F_SMB)
2303 		smb_intr_handler(adapter);
2304 	if (cause & F_XGMAC0)
2305 		xgmac_intr_handler(adapter, 0);
2306 	if (cause & F_XGMAC1)
2307 		xgmac_intr_handler(adapter, 1);
2308 	if (cause & F_XGMAC_KR0)
2309 		xgmac_intr_handler(adapter, 2);
2310 	if (cause & F_XGMAC_KR1)
2311 		xgmac_intr_handler(adapter, 3);
2312 	if (cause & F_PCIE)
2313 		pcie_intr_handler(adapter);
2314 	if (cause & F_MC)
2315 		mem_intr_handler(adapter, MEM_MC);
2316 	if (cause & F_EDC0)
2317 		mem_intr_handler(adapter, MEM_EDC0);
2318 	if (cause & F_EDC1)
2319 		mem_intr_handler(adapter, MEM_EDC1);
2320 	if (cause & F_LE)
2321 		le_intr_handler(adapter);
2322 	if (cause & F_TP)
2323 		tp_intr_handler(adapter);
2324 	if (cause & F_MA)
2325 		ma_intr_handler(adapter);
2326 	if (cause & F_PM_TX)
2327 		pmtx_intr_handler(adapter);
2328 	if (cause & F_PM_RX)
2329 		pmrx_intr_handler(adapter);
2330 	if (cause & F_ULP_RX)
2331 		ulprx_intr_handler(adapter);
2332 	if (cause & F_CPL_SWITCH)
2333 		cplsw_intr_handler(adapter);
2334 	if (cause & F_SGE)
2335 		sge_intr_handler(adapter);
2336 	if (cause & F_ULP_TX)
2337 		ulptx_intr_handler(adapter);
2338 
2339 	/* Clear the interrupts just processed for which we are the master. */
2340 	t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
2341 	(void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2342 	return (1);
2343 }
2344 
2345 /*
2346  *	t4_intr_enable - enable interrupts
2347  *	@adapter: the adapter whose interrupts should be enabled
2348  *
2349  *	Enable PF-specific interrupts for the calling function and the top-level
2350  *	interrupt concentrator for global interrupts.  Interrupts are already
2351  *	enabled at each module,	here we just enable the roots of the interrupt
2352  *	hierarchies.
2353  *
2354  *	Note: this function should be called only when the driver manages
2355  *	non PF-specific interrupts from the various HW modules.  Only one PCI
2356  *	function at a time should be doing this.
2357  */
2358 void
2359 t4_intr_enable(struct adapter *adapter)
2360 {
2361 	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2362 
2363 	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2364 	    F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 | F_ERR_DROPPED_DB |
2365 	    F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0 |
2366 	    F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2367 	    F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | F_ERR_EGR_CTXT_PRIO |
2368 	    F_INGRESS_SIZE_ERR | F_EGRESS_SIZE_ERR);
2369 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2370 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2371 }
2372 
2373 /*
2374  *	t4_intr_disable - disable interrupts
2375  *	@adapter: the adapter whose interrupts should be disabled
2376  *
2377  *	Disable interrupts.  We only disable the top-level interrupt
2378  *	concentrators.  The caller must be a PCI function managing global
2379  *	interrupts.
2380  */
2381 void
2382 t4_intr_disable(struct adapter *adapter)
2383 {
2384 	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2385 
2386 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2387 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2388 }
2389 
2390 /*
2391  *	t4_intr_clear - clear all interrupts
2392  *	@adapter: the adapter whose interrupts should be cleared
2393  *
2394  *	Clears all interrupts.  The caller must be a PCI function managing
2395  *	global interrupts.
2396  */
2397 void
2398 t4_intr_clear(struct adapter *adapter)
2399 {
2400 	static const unsigned int cause_reg[] = {
2401 		A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
2402 		A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2403 		A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2404 		A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
2405 		A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS, A_MA_INT_CAUSE,
2406 		A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
2407 		A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
2408 		MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
2409 		A_TP_INT_CAUSE,
2410 		A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
2411 		A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
2412 		A_MPS_RX_PERR_INT_CAUSE,
2413 		A_CPL_INTR_CAUSE,
2414 		MYPF_REG(A_PL_PF_INT_CAUSE),
2415 		A_PL_PL_INT_CAUSE,
2416 		A_LE_DB_INT_CAUSE,
2417 	};
2418 
2419 	unsigned int i;
2420 
2421 	for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
2422 		t4_write_reg(adapter, cause_reg[i], 0xffffffff);
2423 
2424 
2425 	t4_write_reg(adapter, is_t4(adapter->params.chip) ? A_MC_INT_CAUSE :
2426 	    A_MC_P_INT_CAUSE, 0xffffffff);
2427 
2428 	t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
2429 	(void) t4_read_reg(adapter, A_PL_INT_CAUSE);	/* flush */
2430 }
2431 
2432 /*
2433  *	hash_mac_addr - return the hash value of a MAC address
2434  *	@addr: the 48-bit Ethernet MAC address
2435  *
2436  *	Hashes a MAC address according to the hash function used by HW inexact
2437  *	(hash) address matching.
2438  */
2439 static int
2440 hash_mac_addr(const u8 *addr)
2441 {
2442 	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2443 	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2444 	a ^= b;
2445 	a ^= (a >> 12);
2446 	a ^= (a >> 6);
2447 	return (a & 0x3f);
2448 }
2449 
2450 /*
2451  *	t4_config_rss_range - configure a portion of the RSS mapping table
2452  *	@adapter: the adapter
2453  *	@mbox: mbox to use for the FW command
2454  *	@viid: virtual interface whose RSS subtable is to be written
2455  *	@start: start entry in the table to write
2456  *	@n: how many table entries to write
2457  *	@rspq: values for the "response queue" (Ingress Queue) lookup table
2458  *	@nrspq: number of values in @rspq
2459  *
2460  *	Programs the selected part of the VI's RSS mapping table with the
2461  *	provided values.  If @nrspq < @n the supplied values are used repeatedly
2462  *	until the full table range is populated.
2463  *
2464  *	The caller must ensure the values in @rspq are in the range allowed for
2465  *	@viid.
2466  */
2467 int
2468 t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2469     int start, int n, const u16 *rspq, unsigned int nrspq)
2470 {
2471 	int ret;
2472 	const u16 *rsp = rspq;
2473 	const u16 *rsp_end = rspq + nrspq;
2474 	struct fw_rss_ind_tbl_cmd cmd;
2475 
2476 	(void) memset(&cmd, 0, sizeof (cmd));
2477 	cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2478 	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2479 	    V_FW_RSS_IND_TBL_CMD_VIID(viid));
2480 	cmd.retval_len16 = htonl(FW_LEN16(cmd));
2481 
2482 	/*
2483 	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2484 	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
2485 	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2486 	 * reserved.
2487 	 */
2488 	while (n > 0) {
2489 		int nq = min(n, 32);
2490 		int nq_packed = 0;
2491 		__be32 *qp = &cmd.iq0_to_iq2;
2492 
2493 		/*
2494 		 * Set up the firmware RSS command header to send the next
2495 		 * "nq" Ingress Queue IDs to the firmware.
2496 		 */
2497 		cmd.niqid = htons(nq);
2498 		cmd.startidx = htons(start);
2499 
2500 		/*
2501 		 * "nq" more done for the start of the next loop.
2502 		 */
2503 		start += nq;
2504 		n -= nq;
2505 
2506 		/*
2507 		 * While there are still Ingress Queue IDs to stuff into the
2508 		 * current firmware RSS command, retrieve them from the
2509 		 * Ingress Queue ID array and insert them into the command.
2510 		 */
2511 		while (nq > 0) {
2512 			/*
2513 			 * Grab up to the next 3 Ingress Queue IDs (wrapping
2514 			 * around the Ingress Queue ID array if necessary) and
2515 			 * insert them into the firmware RSS command at the
2516 			 * current 3-tuple position within the commad.
2517 			 */
2518 			u16 qbuf[3];
2519 			u16 *qbp = qbuf;
2520 			int nqbuf = min(3, nq);
2521 
2522 			nq -= nqbuf;
2523 			qbuf[0] = qbuf[1] = qbuf[2] = 0;
2524 			while (nqbuf && nq_packed < 32) {
2525 				nqbuf--;
2526 				nq_packed++;
2527 				*qbp++ = *rsp++;
2528 				if (rsp >= rsp_end)
2529 					rsp = rspq;
2530 			}
2531 			*qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2532 			    V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2533 			    V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2534 		}
2535 
2536 		/*
2537 		 * Send this portion of the RRS table update to the firmware;
2538 		 * bail out on any errors.
2539 		 */
2540 		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof (cmd), NULL);
2541 		if (ret != 0)
2542 			return (ret);
2543 	}
2544 
2545 	return (0);
2546 }
2547 
2548 /*
2549  *	t4_config_glbl_rss - configure the global RSS mode
2550  *	@adapter: the adapter
2551  *	@mbox: mbox to use for the FW command
2552  *	@mode: global RSS mode
2553  *	@flags: mode-specific flags
2554  *
2555  *	Sets the global RSS mode.
2556  */
2557 int
2558 t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2559     unsigned int flags)
2560 {
2561 	struct fw_rss_glb_config_cmd c;
2562 
2563 	(void) memset(&c, 0, sizeof (c));
2564 	c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2565 	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2566 	c.retval_len16 = htonl(FW_LEN16(c));
2567 	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2568 		c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2569 	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2570 		c.u.basicvirtual.mode_pkd =
2571 		    htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2572 		c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2573 	} else
2574 		return (-EINVAL);
2575 	return (t4_wr_mbox(adapter, mbox, &c, sizeof (c), NULL));
2576 }
2577 
2578 /*
2579  *	t4_config_vi_rss - configure per VI RSS settings
2580  *	@adapter: the adapter
2581  *	@mbox: mbox to use for the FW command
2582  *	@viid: the VI id
2583  *	@flags: RSS flags
2584  *	@defq: id of the default RSS queue for the VI.
2585  *
2586  *	Configures VI-specific RSS properties.
2587  */
2588 int
2589 t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2590     unsigned int flags, unsigned int defq)
2591 {
2592 	struct fw_rss_vi_config_cmd c;
2593 
2594 	(void) memset(&c, 0, sizeof (c));
2595 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2596 	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2597 	    V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2598 	c.retval_len16 = htonl(FW_LEN16(c));
2599 	c.u.basicvirtual.defaultq_to_udpen = htonl(flags |
2600 	    V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2601 	return (t4_wr_mbox(adapter, mbox, &c, sizeof (c), NULL));
2602 }
2603 
2604 /* Read an RSS table row */
2605 static int
2606 rd_rss_row(struct adapter *adap, int row, u32 *val)
2607 {
2608 	t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
2609 	return (t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
2610 	    5, 0, val));
2611 }
2612 
2613 /*
2614  *	t4_read_rss - read the contents of the RSS mapping table
2615  *	@adapter: the adapter
2616  *	@map: holds the contents of the RSS mapping table
2617  *
2618  *	Reads the contents of the RSS hash->queue mapping table.
2619  */
2620 int
2621 t4_read_rss(struct adapter *adapter, u16 *map)
2622 {
2623 	u32 val;
2624 	int i, ret;
2625 
2626 	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
2627 		ret = rd_rss_row(adapter, i, &val);
2628 		if (ret != 0)
2629 			return (ret);
2630 		*map++ = G_LKPTBLQUEUE0(val);
2631 		*map++ = G_LKPTBLQUEUE1(val);
2632 	}
2633 	return (0);
2634 }
2635 
2636 /*
2637  *	t4_read_rss_key - read the global RSS key
2638  *	@adap: the adapter
2639  *	@key: 10-entry array holding the 320-bit RSS key
2640  *
2641  *	Reads the global 320-bit RSS key.
2642  */
2643 void
2644 t4_read_rss_key(struct adapter *adap, u32 *key)
2645 {
2646 	t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2647 	    A_TP_RSS_SECRET_KEY0);
2648 }
2649 
2650 /*
2651  *	t4_write_rss_key - program one of the RSS keys
2652  *	@adap: the adapter
2653  *	@key: 10-entry array holding the 320-bit RSS key
2654  *	@idx: which RSS key to write
2655  *
2656  *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
2657  *	0..15 the corresponding entry in the RSS key table is written,
2658  *	otherwise the global RSS key is written.
2659  */
2660 void
2661 t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
2662 {
2663 	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2664 	    A_TP_RSS_SECRET_KEY0);
2665 	if (idx >= 0 && idx < 16)
2666 		t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2667 		    V_KEYWRADDR(idx) | F_KEYWREN);
2668 }
2669 
2670 /*
2671  *	t4_read_rss_pf_config - read PF RSS Configuration Table
2672  *	@adapter: the adapter
2673  *	@index: the entry in the PF RSS table to read
2674  *	@valp: where to store the returned value
2675  *
2676  *	Reads the PF RSS Configuration Table at the specified index and returns
2677  *	the value found there.
2678  */
2679 void
2680 t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp)
2681 {
2682 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2683 	    valp, 1, A_TP_RSS_PF0_CONFIG + index);
2684 }
2685 
2686 /*
2687  *	t4_write_rss_pf_config - write PF RSS Configuration Table
2688  *	@adapter: the adapter
2689  *	@index: the entry in the VF RSS table to read
2690  *	@val: the value to store
2691  *
2692  *	Writes the PF RSS Configuration Table at the specified index with the
2693  *	specified value.
2694  */
2695 void
2696 t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val)
2697 {
2698 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2699 	    &val, 1, A_TP_RSS_PF0_CONFIG + index);
2700 }
2701 
2702 /*
2703  *	t4_read_rss_vf_config - read VF RSS Configuration Table
2704  *	@adapter: the adapter
2705  *	@index: the entry in the VF RSS table to read
2706  *	@vfl: where to store the returned VFL
2707  *	@vfh: where to store the returned VFH
2708  *
2709  *	Reads the VF RSS Configuration Table at the specified index and returns
2710  *	the (VFL, VFH) values found there.
2711  */
2712 void
2713 t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, u32 *vfl,
2714     u32 *vfh)
2715 {
2716 	u32 vrt;
2717 
2718 	/*
2719 	 * Request that the index'th VF Table values be read into VFL/VFH.
2720 	 */
2721 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2722 	vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN);
2723 	vrt |= V_VFWRADDR(index) | F_VFRDEN;
2724 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2725 
2726 	/*
2727 	 * Grab the VFL/VFH values ...
2728 	 */
2729 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2730 	    vfl, 1, A_TP_RSS_VFL_CONFIG);
2731 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2732 	    vfh, 1, A_TP_RSS_VFH_CONFIG);
2733 }
2734 
2735 /*
2736  *	t4_write_rss_vf_config - write VF RSS Configuration Table
2737  *
2738  *	@adapter: the adapter
2739  *	@index: the entry in the VF RSS table to write
2740  *	@vfl: the VFL to store
2741  *	@vfh: the VFH to store
2742  *
2743  *	Writes the VF RSS Configuration Table at the specified index with the
2744  *	specified (VFL, VFH) values.
2745  */
2746 void
2747 t4_write_rss_vf_config(struct adapter *adapter, unsigned int index, u32 vfl,
2748     u32 vfh)
2749 {
2750 	u32 vrt;
2751 
2752 	/*
2753 	 * Load up VFL/VFH with the values to be written ...
2754 	 */
2755 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2756 	    &vfl, 1, A_TP_RSS_VFL_CONFIG);
2757 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2758 	    &vfh, 1, A_TP_RSS_VFH_CONFIG);
2759 
2760 	/*
2761 	 * Write the VFL/VFH into the VF Table at index'th location.
2762 	 */
2763 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2764 	vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN);
2765 	vrt |= V_VFWRADDR(index) | F_VFWREN;
2766 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2767 }
2768 
2769 /*
2770  *	t4_read_rss_pf_map - read PF RSS Map
2771  *	@adapter: the adapter
2772  *
2773  *	Reads the PF RSS Map register and returns its value.
2774  */
2775 u32
2776 t4_read_rss_pf_map(struct adapter *adapter)
2777 {
2778 	u32 pfmap;
2779 
2780 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2781 	    &pfmap, 1, A_TP_RSS_PF_MAP);
2782 	return (pfmap);
2783 }
2784 
2785 /*
2786  *	t4_write_rss_pf_map - write PF RSS Map
2787  *	@adapter: the adapter
2788  *	@pfmap: PF RSS Map value
2789  *
2790  *	Writes the specified value to the PF RSS Map register.
2791  */
2792 void
2793 t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
2794 {
2795 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2796 	    &pfmap, 1, A_TP_RSS_PF_MAP);
2797 }
2798 
2799 /*
2800  *	t4_read_rss_pf_mask - read PF RSS Mask
2801  *	@adapter: the adapter
2802  *
2803  *	Reads the PF RSS Mask register and returns its value.
2804  */
2805 u32
2806 t4_read_rss_pf_mask(struct adapter *adapter)
2807 {
2808 	u32 pfmask;
2809 
2810 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2811 	    &pfmask, 1, A_TP_RSS_PF_MSK);
2812 	return (pfmask);
2813 }
2814 
2815 /*
2816  *	t4_write_rss_pf_mask - write PF RSS Mask
2817  *	@adapter: the adapter
2818  *	@pfmask: PF RSS Mask value
2819  *
2820  *	Writes the specified value to the PF RSS Mask register.
2821  */
2822 void
2823 t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
2824 {
2825 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2826 	    &pfmask, 1, A_TP_RSS_PF_MSK);
2827 }
2828 
2829 /*
2830  *	t4_set_filter_mode - configure the optional components of filter tuples
2831  *	@adap: the adapter
2832  *	@mode_map: a bitmap selcting which optional filter components to enable
2833  *
2834  *	Sets the filter mode by selecting the optional components to enable
2835  *	in filter tuples.  Returns 0 on success and a negative error if the
2836  *	requested mode needs more bits than are available for optional
2837  *	components.
2838  */
2839 int
2840 t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
2841 {
2842 	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
2843 
2844 	int i, nbits = 0;
2845 
2846 	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
2847 		if (mode_map & (1 << i))
2848 			nbits += width[i];
2849 	if (nbits > FILTER_OPT_LEN)
2850 		return (-EINVAL);
2851 	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
2852 	    A_TP_VLAN_PRI_MAP);
2853 	return (0);
2854 }
2855 
2856 /*
2857  *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
2858  *	@adap: the adapter
2859  *	@v4: holds the TCP/IP counter values
2860  *	@v6: holds the TCP/IPv6 counter values
2861  *
2862  *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2863  *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2864  */
2865 void
2866 t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2867     struct tp_tcp_stats *v6)
2868 {
2869 	u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
2870 
2871 #define	STAT_IDX(x)	((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
2872 #define	STAT(x)		val[STAT_IDX(x)]
2873 #define	STAT64(x)	(((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2874 
2875 	if (v4 != NULL) {
2876 		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2877 		    ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
2878 		v4->tcpOutRsts = STAT(OUT_RST);
2879 		v4->tcpInSegs  = STAT64(IN_SEG);
2880 		v4->tcpOutSegs = STAT64(OUT_SEG);
2881 		v4->tcpRetransSegs = STAT64(RXT_SEG);
2882 	}
2883 	if (v6 != NULL) {
2884 		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2885 		    ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
2886 		v6->tcpOutRsts = STAT(OUT_RST);
2887 		v6->tcpInSegs  = STAT64(IN_SEG);
2888 		v6->tcpOutSegs = STAT64(OUT_SEG);
2889 		v6->tcpRetransSegs = STAT64(RXT_SEG);
2890 	}
2891 #undef STAT64
2892 #undef STAT
2893 #undef STAT_IDX
2894 }
2895 
2896 /*
2897  *	t4_tp_get_err_stats - read TP's error MIB counters
2898  *	@adap: the adapter
2899  *	@st: holds the counter values
2900  *
2901  *	Returns the values of TP's error counters.
2902  */
2903 void
2904 t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
2905 {
2906 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs,
2907 	    12, A_TP_MIB_MAC_IN_ERR_0);
2908 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops,
2909 	    8, A_TP_MIB_TNL_CNG_DROP_0);
2910 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops,
2911 	    4, A_TP_MIB_TNL_DROP_0);
2912 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops,
2913 	    4, A_TP_MIB_OFD_VLN_DROP_0);
2914 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs,
2915 	    4, A_TP_MIB_TCP_V6IN_ERR_0);
2916 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh,
2917 	    2, A_TP_MIB_OFD_ARP_DROP);
2918 }
2919 
2920 /*
2921  *	t4_tp_get_proxy_stats - read TP's proxy MIB counters
2922  *	@adap: the adapter
2923  *	@st: holds the counter values
2924  *
2925  *	Returns the values of TP's proxy counters.
2926  */
2927 void
2928 t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
2929 {
2930 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
2931 	    4, A_TP_MIB_TNL_LPBK_0);
2932 }
2933 
2934 /*
2935  *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
2936  *	@adap: the adapter
2937  *	@st: holds the counter values
2938  *
2939  *	Returns the values of TP's CPL counters.
2940  */
2941 void
2942 t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
2943 {
2944 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
2945 	    8, A_TP_MIB_CPL_IN_REQ_0);
2946 }
2947 
2948 /*
2949  *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
2950  *	@adap: the adapter
2951  *	@st: holds the counter values
2952  *
2953  *	Returns the values of TP's RDMA counters.
2954  */
2955 void
2956 t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
2957 {
2958 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod,
2959 	    2, A_TP_MIB_RQE_DFR_MOD);
2960 }
2961 
2962 /*
2963  *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
2964  *	@adap: the adapter
2965  *	@idx: the port index
2966  *	@st: holds the counter values
2967  *
2968  *	Returns the values of TP's FCoE counters for the selected port.
2969  */
2970 void
2971 t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
2972     struct tp_fcoe_stats *st)
2973 {
2974 	u32 val[2];
2975 
2976 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP,
2977 	    1, A_TP_MIB_FCOE_DDP_0 + idx);
2978 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop,
2979 	    1, A_TP_MIB_FCOE_DROP_0 + idx);
2980 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2981 	    2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
2982 	st->octetsDDP = ((u64)val[0] << 32) | val[1];
2983 }
2984 
2985 /*
2986  *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
2987  *	@adap: the adapter
2988  *	@st: holds the counter values
2989  *
2990  *	Returns the values of TP's counters for non-TCP directly-placed packets.
2991  */
2992 void
2993 t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
2994 {
2995 	u32 val[4];
2996 
2997 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
2998 	    A_TP_MIB_USM_PKTS);
2999 	st->frames = val[0];
3000 	st->drops = val[1];
3001 	st->octets = ((u64)val[2] << 32) | val[3];
3002 }
3003 
3004 /*
3005  *	t4_read_mtu_tbl - returns the values in the HW path MTU table
3006  *	@adap: the adapter
3007  *	@mtus: where to store the MTU values
3008  *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
3009  *
3010  *	Reads the HW path MTU table.
3011  */
3012 void
3013 t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
3014 {
3015 	u32 v;
3016 	int i;
3017 
3018 	for (i = 0; i < NMTUS; ++i) {
3019 		t4_write_reg(adap, A_TP_MTU_TABLE,
3020 		    V_MTUINDEX(0xffU) | V_MTUVALUE(i));
3021 		v = t4_read_reg(adap, A_TP_MTU_TABLE);
3022 		mtus[i] = G_MTUVALUE(v);
3023 		if (mtu_log != NULL)
3024 			mtu_log[i] = G_MTUWIDTH(v);
3025 	}
3026 }
3027 
3028 /*
3029  *	t4_read_cong_tbl - reads the congestion control table
3030  *	@adap: the adapter
3031  *	@incr: where to store the alpha values
3032  *
3033  *	Reads the additive increments programmed into the HW congestion
3034  *	control table.
3035  */
3036 void
3037 t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
3038 {
3039 	unsigned int mtu, w;
3040 
3041 	for (mtu = 0; mtu < NMTUS; ++mtu)
3042 		for (w = 0; w < NCCTRL_WIN; ++w) {
3043 			t4_write_reg(adap, A_TP_CCTRL_TABLE,
3044 			    V_ROWINDEX(0xffffU) | (mtu << 5) | w);
3045 			incr[mtu][w] = (u16)t4_read_reg(adap,
3046 			    A_TP_CCTRL_TABLE) & 0x1fff;
3047 		}
3048 }
3049 
3050 /*
3051  *	t4_read_pace_tbl - read the pace table
3052  *	@adap: the adapter
3053  *	@pace_vals: holds the returned values
3054  *
3055  *	Returns the values of TP's pace table in microseconds.
3056  */
3057 void
3058 t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
3059 {
3060 	unsigned int i, v;
3061 
3062 	for (i = 0; i < NTX_SCHED; i++) {
3063 		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
3064 		v = t4_read_reg(adap, A_TP_PACE_TABLE);
3065 		pace_vals[i] = dack_ticks_to_usec(adap, v);
3066 	}
3067 }
3068 
3069 /*
3070  *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
3071  *	@adap: the adapter
3072  *	@addr: the indirect TP register address
3073  *	@mask: specifies the field within the register to modify
3074  *	@val: new value for the field
3075  *
3076  *	Sets a field of an indirect TP register to the given value.
3077  */
3078 void
3079 t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
3080     unsigned int mask, unsigned int val)
3081 {
3082 	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
3083 	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
3084 	t4_write_reg(adap, A_TP_PIO_DATA, val);
3085 }
3086 
3087 /*
3088  *	init_cong_ctrl - initialize congestion control parameters
3089  *	@a: the alpha values for congestion control
3090  *	@b: the beta values for congestion control
3091  *
3092  *	Initialize the congestion control parameters.
3093  */
3094 static void __devinit
3095 init_cong_ctrl(unsigned short *a, unsigned short *b)
3096 {
3097 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
3098 	a[9] = 2;
3099 	a[10] = 3;
3100 	a[11] = 4;
3101 	a[12] = 5;
3102 	a[13] = 6;
3103 	a[14] = 7;
3104 	a[15] = 8;
3105 	a[16] = 9;
3106 	a[17] = 10;
3107 	a[18] = 14;
3108 	a[19] = 17;
3109 	a[20] = 21;
3110 	a[21] = 25;
3111 	a[22] = 30;
3112 	a[23] = 35;
3113 	a[24] = 45;
3114 	a[25] = 60;
3115 	a[26] = 80;
3116 	a[27] = 100;
3117 	a[28] = 200;
3118 	a[29] = 300;
3119 	a[30] = 400;
3120 	a[31] = 500;
3121 
3122 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
3123 	b[9] = b[10] = 1;
3124 	b[11] = b[12] = 2;
3125 	b[13] = b[14] = b[15] = b[16] = 3;
3126 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
3127 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
3128 	b[28] = b[29] = 6;
3129 	b[30] = b[31] = 7;
3130 }
3131 
3132 /* The minimum additive increment value for the congestion control table */
3133 #define	CC_MIN_INCR 2U
3134 
3135 /*
3136  *	t4_load_mtus - write the MTU and congestion control HW tables
3137  *	@adap: the adapter
3138  *	@mtus: the values for the MTU table
3139  *	@alpha: the values for the congestion control alpha parameter
3140  *	@beta: the values for the congestion control beta parameter
3141  *
3142  *	Write the HW MTU table with the supplied MTUs and the high-speed
3143  *	congestion control table with the supplied alpha, beta, and MTUs.
3144  *	We write the two tables together because the additive increments
3145  *	depend on the MTUs.
3146  */
3147 void
3148 t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
3149     const unsigned short *alpha, const unsigned short *beta)
3150 {
3151 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
3152 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3153 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3154 		28672, 40960, 57344, 81920, 114688, 163840, 229376
3155 	};
3156 
3157 	unsigned int i, w;
3158 
3159 	for (i = 0; i < NMTUS; ++i) {
3160 		unsigned int mtu = mtus[i];
3161 		unsigned int log2 = fls(mtu);
3162 
3163 		if (!(mtu & ((1 << log2) >> 2)))	/* round */
3164 			log2--;
3165 		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
3166 		    V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
3167 
3168 		for (w = 0; w < NCCTRL_WIN; ++w) {
3169 			unsigned int inc;
3170 
3171 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3172 			    CC_MIN_INCR);
3173 
3174 			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3175 			    (w << 16) | (beta[w] << 13) | inc);
3176 		}
3177 	}
3178 }
3179 
3180 /*
3181  *	t4_set_pace_tbl - set the pace table
3182  *	@adap: the adapter
3183  *	@pace_vals: the pace values in microseconds
3184  *	@start: index of the first entry in the HW pace table to set
3185  *	@n: how many entries to set
3186  *
3187  *	Sets (a subset of the) HW pace table.
3188  */
3189 int
3190 t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
3191     unsigned int start, unsigned int n)
3192 {
3193 	unsigned int vals[NTX_SCHED], i;
3194 	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3195 
3196 	if (n > NTX_SCHED)
3197 		return (-ERANGE);
3198 
3199 	/* convert values from us to dack ticks, rounding to closest value */
3200 	for (i = 0; i < n; i++, pace_vals++) {
3201 		vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
3202 		if (vals[i] > 0x7ff)
3203 			return (-ERANGE);
3204 		if (*pace_vals && vals[i] == 0)
3205 			return (-ERANGE);
3206 	}
3207 	for (i = 0; i < n; i++, start++)
3208 		t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
3209 	return (0);
3210 }
3211 
3212 /*
3213  *	t4_set_sched_bps - set the bit rate for a HW traffic scheduler
3214  *	@adap: the adapter
3215  *	@kbps: target rate in Kbps
3216  *	@sched: the scheduler index
3217  *
3218  *	Configure a Tx HW scheduler for the target rate.
3219  */
3220 int
3221 t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
3222 {
3223 	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0U;
3224 	unsigned int clk = adap->params.vpd.cclk * 1000;
3225 	unsigned int selected_cpt = 0, selected_bpt = 0;
3226 
3227 	if (kbps > 0) {
3228 		kbps *= 125;	/* -> bytes */
3229 		for (cpt = 1; cpt <= 255; cpt++) {
3230 			tps = clk / cpt;
3231 			bpt = (kbps + tps / 2) / tps;
3232 			if (bpt > 0 && bpt <= 255) {
3233 				v = bpt * tps;
3234 				delta = v >= kbps ? v - kbps : kbps - v;
3235 				if (delta < mindelta) {
3236 					mindelta = delta;
3237 					selected_cpt = cpt;
3238 					selected_bpt = bpt;
3239 				}
3240 			} else if (selected_cpt != 0)
3241 				break;
3242 		}
3243 		if (!selected_cpt)
3244 			return (-EINVAL);
3245 	}
3246 	t4_write_reg(adap, A_TP_TM_PIO_ADDR,
3247 	    A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3248 	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3249 	if (sched & 1)
3250 		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3251 	else
3252 		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3253 	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3254 	return (0);
3255 }
3256 
3257 /*
3258  *	t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3259  *	@adap: the adapter
3260  *	@sched: the scheduler index
3261  *	@ipg: the interpacket delay in tenths of nanoseconds
3262  *
3263  *	Set the interpacket delay for a HW packet rate scheduler.
3264  */
3265 int
3266 t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
3267 {
3268 	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3269 
3270 	/* convert ipg to nearest number of core clocks */
3271 	ipg *= core_ticks_per_usec(adap);
3272 	ipg = (ipg + 5000) / 10000;
3273 	if (ipg > M_TXTIMERSEPQ0)
3274 		return (-EINVAL);
3275 
3276 	t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3277 	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3278 	if (sched & 1)
3279 		v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
3280 	else
3281 		v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
3282 	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3283 	(void) t4_read_reg(adap, A_TP_TM_PIO_DATA);
3284 	return (0);
3285 }
3286 
3287 /*
3288  *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3289  *	@adap: the adapter
3290  *	@sched: the scheduler index
3291  *	@kbps: the byte rate in Kbps
3292  *	@ipg: the interpacket delay in tenths of nanoseconds
3293  *
3294  *	Return the current configuration of a HW Tx scheduler.
3295  */
3296 void
3297 t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
3298     unsigned int *ipg)
3299 {
3300 	unsigned int v, addr, bpt, cpt;
3301 
3302 	if (kbps != NULL) {
3303 		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3304 		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3305 		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3306 		if (sched & 1)
3307 			v >>= 16;
3308 		bpt = (v >> 8) & 0xff;
3309 		cpt = v & 0xff;
3310 		if (!cpt)
3311 			*kbps = 0;	/* scheduler disabled */
3312 		else {
3313 			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
3314 			*kbps = (v * bpt) / 125;
3315 		}
3316 	}
3317 	if (ipg != NULL) {
3318 		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3319 		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3320 		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3321 		if (sched & 1)
3322 			v >>= 16;
3323 		v &= 0xffff;
3324 		*ipg = (10000 * v) / core_ticks_per_usec(adap);
3325 	}
3326 }
3327 
3328 /*
3329  * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
3330  * clocks.  The formula is
3331  *
3332  * bytes/s = bytes256 * 256 * ClkFreq / 4096
3333  *
3334  * which is equivalent to
3335  *
3336  * bytes/s = 62.5 * bytes256 * ClkFreq_ms
3337  */
3338 static u64
3339 chan_rate(struct adapter *adap, unsigned int bytes256)
3340 {
3341 	u64 v = bytes256 * adap->params.vpd.cclk;
3342 
3343 	return (v * 62 + v / 2);
3344 }
3345 
3346 /*
3347  *	t4_get_chan_txrate - get the current per channel Tx rates
3348  *	@adap: the adapter
3349  *	@nic_rate: rates for NIC traffic
3350  *	@ofld_rate: rates for offloaded traffic
3351  *
3352  *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
3353  *	for each channel.
3354  */
3355 void
3356 t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
3357 {
3358 	u32 v;
3359 
3360 	v = t4_read_reg(adap, A_TP_TX_TRATE);
3361 	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
3362 	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
3363 	nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
3364 	nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
3365 
3366 	v = t4_read_reg(adap, A_TP_TX_ORATE);
3367 	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
3368 	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
3369 	ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
3370 	ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
3371 }
3372 
3373 /*
3374  *	t4_set_trace_filter - configure one of the tracing filters
3375  *	@adap: the adapter
3376  *	@tp: the desired trace filter parameters
3377  *	@idx: which filter to configure
3378  *	@enable: whether to enable or disable the filter
3379  *
3380  *	Configures one of the tracing filters available in HW.  If @enable is
3381  *	%0 @tp is not examined and may be %NULL.
3382  */
3383 int
3384 t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
3385     int idx, int enable)
3386 {
3387 	int i, ofst = idx * 4;
3388 	u32 data_reg, mask_reg, cfg;
3389 
3390 	if (!enable) {
3391 		t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3392 		return 0;
3393 	}
3394 
3395 	/*
3396 	 * TODO - After T4 data book is updated, specify the exact
3397 	 * section below.
3398 	 *
3399 	 * See T4 data book - MPS section for a complete description
3400 	 * of the below if..else handling of A_MPS_TRC_CFG register
3401 	 * value.
3402 	 */
3403 	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
3404 	if (cfg & F_TRCMULTIFILTER) {
3405 		/*
3406 		 * If multiple tracers are enabled, then maximum
3407 		 * capture size is 2.5KB (FIFO size of a single channel)
3408 		 * minus 2 flits for CPL_TRACE_PKT header.
3409 		 */
3410 		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
3411  			return -EINVAL;
3412 	}
3413 	else {
3414 		/*
3415 		 * If multiple tracers are disabled, to avoid deadlocks
3416 		 * maximum packet capture size of 9600 bytes is recommended.
3417 		 * Also in this mode, only trace0 can be enabled and running.
3418 		 */
3419 		if (tp->snap_len > 9600 || idx)
3420 			return -EINVAL;
3421 	}
3422 
3423 	if (tp->port > 11 || tp->invert > 1 || tp->skip_len > M_TFLENGTH ||
3424 	    tp->skip_ofst > M_TFOFFSET || tp->min_len > M_TFMINPKTSIZE)
3425 		return -EINVAL;
3426 
3427 	/* stop the tracer we'll be changing */
3428 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3429 
3430 	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
3431 	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
3432 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
3433 
3434 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3435 		t4_write_reg(adap, data_reg, tp->data[i]);
3436 		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
3437 	}
3438 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
3439 	    V_TFCAPTUREMAX(tp->snap_len) | V_TFMINPKTSIZE(tp->min_len));
3440 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
3441 	    V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
3442 	    is_t4(adap->params.chip) ?
3443 	    V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert) :
3444 	    V_T5_TFPORT(tp->port) | F_T5_TFEN |
3445 	    V_T5_TFINVERTMATCH(tp->invert));
3446 
3447 	return (0);
3448 }
3449 
3450 /*
3451  *	t4_get_trace_filter - query one of the tracing filters
3452  *	@adap: the adapter
3453  *	@tp: the current trace filter parameters
3454  *	@idx: which trace filter to query
3455  *	@enabled: non-zero if the filter is enabled
3456  *
3457  *	Returns the current settings of one of the HW tracing filters.
3458  */
3459 void
3460 t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
3461     int *enabled)
3462 {
3463 	u32 ctla, ctlb;
3464 	int i, ofst = idx * 4;
3465 	u32 data_reg, mask_reg;
3466 
3467 	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
3468 	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
3469 
3470 	if (is_t4(adap->params.chip)) {
3471 		*enabled = !!(ctla & F_TFEN);
3472 		tp->port = G_TFPORT(ctla);
3473 	} else {
3474 		*enabled = !!(ctla & F_T5_TFEN);
3475 		tp->port = G_T5_TFPORT(ctla);
3476 	}
3477 	tp->snap_len = G_TFCAPTUREMAX(ctlb);
3478 	tp->min_len = G_TFMINPKTSIZE(ctlb);
3479 	tp->skip_ofst = G_TFOFFSET(ctla);
3480 	tp->skip_len = G_TFLENGTH(ctla);
3481 	tp->invert = !!(ctla & F_TFINVERTMATCH);
3482 
3483 	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
3484 	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
3485 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
3486 
3487 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3488 		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
3489 		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
3490 	}
3491 }
3492 
3493 /*
3494  *	t4_pmtx_get_stats - returns the HW stats from PMTX
3495  *	@adap: the adapter
3496  *	@cnt: where to store the count statistics
3497  *	@cycles: where to store the cycle statistics
3498  *
3499  *	Returns performance statistics from PMTX.
3500  */
3501 void
3502 t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3503 {
3504 	int i;
3505 	u32 data[2];
3506 
3507 	for (i = 0; i < PM_NSTATS; i++) {
3508 		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
3509 		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
3510 		if (is_t4(adap->params.chip))
3511 			cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
3512 		else {
3513 			t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
3514 			    A_PM_TX_DBG_DATA, data, 2,
3515 			    A_PM_TX_DBG_STAT_MSB);
3516 			cycles[i] = (((u64)data[0] << 32) | data[1]);
3517 		}
3518 	}
3519 }
3520 
3521 /*
3522  *	t4_pmrx_get_stats - returns the HW stats from PMRX
3523  *	@adap: the adapter
3524  *	@cnt: where to store the count statistics
3525  *	@cycles: where to store the cycle statistics
3526  *
3527  *	Returns performance statistics from PMRX.
3528  */
3529 void
3530 t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3531 {
3532 	int i;
3533 	u32 data[2];
3534 
3535 	for (i = 0; i < PM_NSTATS; i++) {
3536 		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
3537 		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
3538 		if (is_t4(adap->params.chip))
3539 			cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
3540 		else {
3541 			t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
3542 			    A_PM_RX_DBG_DATA, data, 2,
3543 			    A_PM_RX_DBG_STAT_MSB);
3544 			cycles[i] = (((u64)data[0] << 32) | data[1]);
3545 		}
3546 	}
3547 }
3548 
3549 /*
3550  *	get_mps_bg_map - return the buffer groups associated with a port
3551  *	@adap: the adapter
3552  *	@idx: the port index
3553  *
3554  *	Returns a bitmap indicating which MPS buffer groups are associated
3555  *	with the given port.  Bit i is set if buffer group i is used by the
3556  *	port.
3557  */
3558 static unsigned int
3559 get_mps_bg_map(struct adapter *adap, int idx)
3560 {
3561 	u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
3562 
3563 	if (n == 0)
3564 		return (idx == 0 ? 0xf : 0);
3565 	if (n == 1)
3566 		return (idx < 2 ? (3 << (2 * idx)) : 0);
3567 	return (1 << idx);
3568 }
3569 
3570 /*
3571  *	t4_get_port_stats - collect port statistics
3572  *	@adap: the adapter
3573  *	@idx: the port index
3574  *	@p: the stats structure to fill
3575  *
3576  *	Collect statistics related to the given port from HW.
3577  */
3578 void
3579 t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3580 {
3581 	u32 bgmap = get_mps_bg_map(adap, idx);
3582 
3583 #define	GET_STAT(name) \
3584 	t4_read_reg64(adap, \
3585 	(is_t4(adap->params.chip) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
3586 	T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
3587 #define	GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3588 
3589 	p->tx_pause		= GET_STAT(TX_PORT_PAUSE);
3590 	p->tx_octets		= GET_STAT(TX_PORT_BYTES);
3591 	p->tx_frames		= GET_STAT(TX_PORT_FRAMES);
3592 	p->tx_bcast_frames	= GET_STAT(TX_PORT_BCAST);
3593 	p->tx_mcast_frames	= GET_STAT(TX_PORT_MCAST);
3594 	p->tx_ucast_frames	= GET_STAT(TX_PORT_UCAST);
3595 	p->tx_error_frames	= GET_STAT(TX_PORT_ERROR);
3596 	p->tx_frames_64		= GET_STAT(TX_PORT_64B);
3597 	p->tx_frames_65_127	= GET_STAT(TX_PORT_65B_127B);
3598 	p->tx_frames_128_255	= GET_STAT(TX_PORT_128B_255B);
3599 	p->tx_frames_256_511	= GET_STAT(TX_PORT_256B_511B);
3600 	p->tx_frames_512_1023	= GET_STAT(TX_PORT_512B_1023B);
3601 	p->tx_frames_1024_1518	= GET_STAT(TX_PORT_1024B_1518B);
3602 	p->tx_frames_1519_max	= GET_STAT(TX_PORT_1519B_MAX);
3603 	p->tx_drop		= GET_STAT(TX_PORT_DROP);
3604 	p->tx_ppp0		= GET_STAT(TX_PORT_PPP0);
3605 	p->tx_ppp1		= GET_STAT(TX_PORT_PPP1);
3606 	p->tx_ppp2		= GET_STAT(TX_PORT_PPP2);
3607 	p->tx_ppp3		= GET_STAT(TX_PORT_PPP3);
3608 	p->tx_ppp4		= GET_STAT(TX_PORT_PPP4);
3609 	p->tx_ppp5		= GET_STAT(TX_PORT_PPP5);
3610 	p->tx_ppp6		= GET_STAT(TX_PORT_PPP6);
3611 	p->tx_ppp7		= GET_STAT(TX_PORT_PPP7);
3612 
3613 	p->rx_pause		= GET_STAT(RX_PORT_PAUSE);
3614 	p->rx_octets		= GET_STAT(RX_PORT_BYTES);
3615 	p->rx_frames		= GET_STAT(RX_PORT_FRAMES);
3616 	p->rx_bcast_frames	= GET_STAT(RX_PORT_BCAST);
3617 	p->rx_mcast_frames	= GET_STAT(RX_PORT_MCAST);
3618 	p->rx_ucast_frames	= GET_STAT(RX_PORT_UCAST);
3619 	p->rx_too_long		= GET_STAT(RX_PORT_MTU_ERROR);
3620 	p->rx_jabber		= GET_STAT(RX_PORT_MTU_CRC_ERROR);
3621 	p->rx_fcs_err		= GET_STAT(RX_PORT_CRC_ERROR);
3622 	p->rx_len_err		= GET_STAT(RX_PORT_LEN_ERROR);
3623 	p->rx_symbol_err	= GET_STAT(RX_PORT_SYM_ERROR);
3624 	p->rx_runt		= GET_STAT(RX_PORT_LESS_64B);
3625 	p->rx_frames_64		= GET_STAT(RX_PORT_64B);
3626 	p->rx_frames_65_127	= GET_STAT(RX_PORT_65B_127B);
3627 	p->rx_frames_128_255	= GET_STAT(RX_PORT_128B_255B);
3628 	p->rx_frames_256_511	= GET_STAT(RX_PORT_256B_511B);
3629 	p->rx_frames_512_1023	= GET_STAT(RX_PORT_512B_1023B);
3630 	p->rx_frames_1024_1518	= GET_STAT(RX_PORT_1024B_1518B);
3631 	p->rx_frames_1519_max	= GET_STAT(RX_PORT_1519B_MAX);
3632 	p->rx_ppp0		= GET_STAT(RX_PORT_PPP0);
3633 	p->rx_ppp1		= GET_STAT(RX_PORT_PPP1);
3634 	p->rx_ppp2		= GET_STAT(RX_PORT_PPP2);
3635 	p->rx_ppp3		= GET_STAT(RX_PORT_PPP3);
3636 	p->rx_ppp4		= GET_STAT(RX_PORT_PPP4);
3637 	p->rx_ppp5		= GET_STAT(RX_PORT_PPP5);
3638 	p->rx_ppp6		= GET_STAT(RX_PORT_PPP6);
3639 	p->rx_ppp7		= GET_STAT(RX_PORT_PPP7);
3640 
3641 	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3642 	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3643 	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3644 	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3645 	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3646 	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3647 	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3648 	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3649 
3650 #undef GET_STAT
3651 #undef GET_STAT_COM
3652 }
3653 
3654 /*
3655  *	t4_clr_port_stats - clear port statistics
3656  *	@adap: the adapter
3657  *	@idx: the port index
3658  *
3659  *	Clear HW statistics for the given port.
3660  */
3661 void
3662 t4_clr_port_stats(struct adapter *adap, int idx)
3663 {
3664 	unsigned int i;
3665 	u32 bgmap = get_mps_bg_map(adap, idx);
3666 	u32 port_base_addr;
3667 
3668 	if (is_t4(adap->params.chip))
3669 		port_base_addr = PORT_BASE(idx);
3670 	else
3671 		port_base_addr = T5_PORT_BASE(idx);
3672 
3673 
3674 	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3675 	    i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3676 		t4_write_reg(adap, port_base_addr + i, 0);
3677 	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3678 	    i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3679 		t4_write_reg(adap, port_base_addr + i, 0);
3680 	for (i = 0; i < 4; i++)
3681 		if (bgmap & (1 << i)) {
3682 			t4_write_reg(adap,
3683 			    A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
3684 			t4_write_reg(adap,
3685 			    A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
3686 		}
3687 }
3688 
3689 /*
3690  *	t4_get_lb_stats - collect loopback port statistics
3691  *	@adap: the adapter
3692  *	@idx: the loopback port index
3693  *	@p: the stats structure to fill
3694  *
3695  *	Return HW statistics for the given loopback port.
3696  */
3697 void
3698 t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
3699 {
3700 	u32 bgmap = get_mps_bg_map(adap, idx);
3701 
3702 #define	GET_STAT(name) \
3703 	t4_read_reg64(adap, \
3704 	(is_t4(adap->params.chip) ? \
3705 	PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
3706 	T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
3707 #define	GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3708 
3709 	p->octets		= GET_STAT(BYTES);
3710 	p->frames		= GET_STAT(FRAMES);
3711 	p->bcast_frames		= GET_STAT(BCAST);
3712 	p->mcast_frames		= GET_STAT(MCAST);
3713 	p->ucast_frames		= GET_STAT(UCAST);
3714 	p->error_frames		= GET_STAT(ERROR);
3715 
3716 	p->frames_64		= GET_STAT(64B);
3717 	p->frames_65_127	= GET_STAT(65B_127B);
3718 	p->frames_128_255	= GET_STAT(128B_255B);
3719 	p->frames_256_511	= GET_STAT(256B_511B);
3720 	p->frames_512_1023	= GET_STAT(512B_1023B);
3721 	p->frames_1024_1518	= GET_STAT(1024B_1518B);
3722 	p->frames_1519_max	= GET_STAT(1519B_MAX);
3723 	p->drop			= GET_STAT(DROP_FRAMES);
3724 
3725 	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
3726 	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
3727 	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
3728 	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
3729 	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
3730 	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
3731 	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
3732 	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
3733 
3734 #undef GET_STAT
3735 #undef GET_STAT_COM
3736 }
3737 
3738 /*
3739  *	t4_wol_magic_enable - enable/disable magic packet WoL
3740  *	@adap: the adapter
3741  *	@port: the physical port index
3742  *	@addr: MAC address expected in magic packets, %NULL to disable
3743  *
3744  *	Enables/disables magic packet wake-on-LAN for the selected port.
3745  */
3746 void
3747 t4_wol_magic_enable(struct adapter *adap, unsigned int port, const u8 *addr)
3748 {
3749 	if (addr != NULL) {
3750 		t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO),
3751 		    (addr[2] << 24) | (addr[3] << 16) |
3752 		    (addr[4] << 8) | addr[5]);
3753 		t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI),
3754 		    (addr[0] << 8) | addr[1]);
3755 	}
3756 	t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), F_MAGICEN,
3757 	    V_MAGICEN(addr != NULL));
3758 }
3759 
3760 /*
3761  *	t4_wol_pat_enable - enable/disable pattern-based WoL
3762  *	@adap: the adapter
3763  *	@port: the physical port index
3764  *	@map: bitmap of which HW pattern filters to set
3765  *	@mask0: byte mask for bytes 0-63 of a packet
3766  *	@mask1: byte mask for bytes 64-127 of a packet
3767  *	@crc: Ethernet CRC for selected bytes
3768  *	@enable: enable/disable switch
3769  *
3770  *	Sets the pattern filters indicated in @map to mask out the bytes
3771  *	specified in @mask0/@mask1 in received packets and compare the CRC of
3772  *	the resulting packet against @crc.  If @enable is %true pattern-based
3773  *	WoL is enabled, otherwise disabled.
3774  */
3775 int
3776 t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
3777     u64 mask0, u64 mask1, unsigned int crc, bool enable)
3778 {
3779 	int i;
3780 	u32 port_cfg_reg;
3781 
3782 	if (is_t4(adap->params.chip))
3783 		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
3784 	else
3785 		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
3786 
3787 	if (!enable) {
3788 		t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
3789 		return (0);
3790 	}
3791 	if (map > 0xff)
3792 		return (-EINVAL);
3793 
3794 
3795 #define EPIO_REG(name) \
3796 	(is_t4(adap->params.chip) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
3797 	T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
3798 
3799 	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
3800 	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
3801 	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
3802 
3803 	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
3804 		if (!(map & 1))
3805 			continue;
3806 
3807 		/* write byte masks */
3808 		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
3809 		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
3810 		(void) t4_read_reg(adap, EPIO_REG(OP));		/* flush */
3811 		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
3812 			return (-ETIMEDOUT);
3813 
3814 		/* write CRC */
3815 		t4_write_reg(adap, EPIO_REG(DATA0), crc);
3816 		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
3817 		(void) t4_read_reg(adap, EPIO_REG(OP));		/* flush */
3818 		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
3819 			return (-ETIMEDOUT);
3820 	}
3821 #undef EPIO_REG
3822 
3823 	t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
3824 	return (0);
3825 }
3826 
3827 /*
3828  *	t4_mk_filtdelwr - create a delete filter WR
3829  *	@ftid: the filter ID
3830  *	@wr: the filter work request to populate
3831  *	@qid: ingress queue to receive the delete notification
3832  *
3833  *	Creates a filter work request to delete the supplied filter.  If @qid is
3834  *	negative the delete notification is suppressed.
3835  */
3836 void
3837 t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
3838 {
3839 	(void) memset(wr, 0, sizeof (*wr));
3840 	wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR));
3841 	wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof (*wr) / 16));
3842 	wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
3843 	    V_FW_FILTER_WR_NOREPLY(qid < 0));
3844 	wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
3845 	if (qid >= 0)
3846 		wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
3847 }
3848 
3849 #define	INIT_CMD(var, cmd, rd_wr) do { \
3850 	(var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \
3851 	    F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
3852 	(var).retval_len16 = htonl(FW_LEN16(var)); \
3853 } while (0)
3854 
3855 /*
3856  *	t4_mdio_rd - read a PHY register through MDIO
3857  *	@adap: the adapter
3858  *	@mbox: mailbox to use for the FW command
3859  *	@phy_addr: the PHY address
3860  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
3861  *	@reg: the register to read
3862  *	@valp: where to store the value
3863  *
3864  *	Issues a FW command through the given mailbox to read a PHY register.
3865  */
3866 int
3867 t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
3868     unsigned int mmd, unsigned int reg, unsigned int *valp)
3869 {
3870 	int ret;
3871 	struct fw_ldst_cmd c;
3872 
3873 	(void) memset(&c, 0, sizeof (c));
3874 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3875 	    F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
3876 	c.cycles_to_len16 = htonl(FW_LEN16(c));
3877 	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
3878 	    V_FW_LDST_CMD_MMD(mmd));
3879 	c.u.mdio.raddr = htons(reg);
3880 
3881 	ret = t4_wr_mbox(adap, mbox, &c, sizeof (c), &c);
3882 	if (ret == 0)
3883 		*valp = ntohs(c.u.mdio.rval);
3884 	return (ret);
3885 }
3886 
3887 /*
3888  *	t4_mdio_wr - write a PHY register through MDIO
3889  *	@adap: the adapter
3890  *	@mbox: mailbox to use for the FW command
3891  *	@phy_addr: the PHY address
3892  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
3893  *	@reg: the register to write
3894  *	@valp: value to write
3895  *
3896  *	Issues a FW command through the given mailbox to write a PHY register.
3897  */
3898 int
3899 t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
3900     unsigned int mmd, unsigned int reg, unsigned int val)
3901 {
3902 	struct fw_ldst_cmd c;
3903 
3904 	(void) memset(&c, 0, sizeof (c));
3905 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3906 	    F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
3907 	c.cycles_to_len16 = htonl(FW_LEN16(c));
3908 	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
3909 	    V_FW_LDST_CMD_MMD(mmd));
3910 	c.u.mdio.raddr = htons(reg);
3911 	c.u.mdio.rval = htons(val);
3912 
3913 	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
3914 }
3915 
3916 /*
3917  *	t4_sge_ctxt_rd - read an SGE context through FW
3918  *	@adap: the adapter
3919  *	@mbox: mailbox to use for the FW command
3920  *	@cid: the context id
3921  *	@ctype: the context type
3922  *	@data: where to store the context data
3923  *
3924  *	Issues a FW command through the given mailbox to read an SGE context.
3925  */
3926 int
3927 t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
3928     enum ctxt_type ctype, u32 *data)
3929 {
3930 	int ret;
3931 	struct fw_ldst_cmd c;
3932 
3933 	if (ctype == CTXT_EGRESS)
3934 		ret = FW_LDST_ADDRSPC_SGE_EGRC;
3935 	else if (ctype == CTXT_INGRESS)
3936 		ret = FW_LDST_ADDRSPC_SGE_INGC;
3937 	else if (ctype == CTXT_FLM)
3938 		ret = FW_LDST_ADDRSPC_SGE_FLMC;
3939 	else
3940 		ret = FW_LDST_ADDRSPC_SGE_CONMC;
3941 
3942 	(void) memset(&c, 0, sizeof (c));
3943 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3944 	    F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret));
3945 	c.cycles_to_len16 = htonl(FW_LEN16(c));
3946 	c.u.idctxt.physid = htonl(cid);
3947 
3948 	ret = t4_wr_mbox(adap, mbox, &c, sizeof (c), &c);
3949 	if (ret == 0) {
3950 		data[0] = ntohl(c.u.idctxt.ctxt_data0);
3951 		data[1] = ntohl(c.u.idctxt.ctxt_data1);
3952 		data[2] = ntohl(c.u.idctxt.ctxt_data2);
3953 		data[3] = ntohl(c.u.idctxt.ctxt_data3);
3954 		data[4] = ntohl(c.u.idctxt.ctxt_data4);
3955 		data[5] = ntohl(c.u.idctxt.ctxt_data5);
3956 	}
3957 	return (ret);
3958 }
3959 
3960 /*
3961  *	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
3962  *	@adap: the adapter
3963  *	@cid: the context id
3964  *	@ctype: the context type
3965  *	@data: where to store the context data
3966  *
3967  *	Reads an SGE context directly, bypassing FW.  This is only for
3968  *	debugging when FW is unavailable.
3969  */
3970 int
3971 t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
3972     u32 *data)
3973 {
3974 	int i, ret;
3975 
3976 	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
3977 	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
3978 	if (!ret)
3979 		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
3980 			*data++ = t4_read_reg(adap, i);
3981 	return (ret);
3982 }
3983 
3984 /*
3985  *	t4_fw_hello - establish communication with FW
3986  *	@adap: the adapter
3987  *	@mbox: mailbox to use for the FW command
3988  *	@evt_mbox: mailbox to receive async FW events
3989  *	@master: specifies the caller's willingness to be the device master
3990  *	@state: returns the current device state (if non-NULL)
3991 
3992  *	Issues a command to establish communication with FW.  Returns either
3993  *	an error (negative integer) or the mailbox of the Master PF.
3994  */
3995 int
3996 t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
3997     enum dev_master master, enum dev_state *state)
3998 {
3999 	int ret;
4000 	struct fw_hello_cmd c;
4001 	u32 v;
4002 	unsigned int master_mbox;
4003 	int retries = FW_CMD_HELLO_RETRIES;
4004 
4005 retry:
4006 	(void) memset(&c, 0, sizeof (c));
4007 	/* LINTED: E_CONSTANT_CONDITION */
4008 	INIT_CMD(c, HELLO, WRITE);
4009 	c.err_to_clearinit = htonl(
4010 	    V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
4011 	    V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
4012 	    V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
4013 	    M_FW_HELLO_CMD_MBMASTER) |
4014 	    V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
4015 	    V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
4016 	    F_FW_HELLO_CMD_CLEARINIT);
4017 
4018 	/*
4019 	 * Issue the HELLO command to the firmware.  If it's not successful
4020 	 * but indicates that we got a "busy" or "timeout" condition, retry
4021 	 * the HELLO until we exhaust our retry limit.  If we do exceed our
4022 	 * retry limit, check to see if the firmware left us any error
4023 	 * information and report that if so ...
4024 	 */
4025 	ret = t4_wr_mbox(adap, mbox, &c, sizeof (c), &c);
4026 	if (ret != FW_SUCCESS) {
4027 		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
4028 			goto retry;
4029 		if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
4030 			t4_report_fw_error(adap);
4031 		return (ret);
4032 	}
4033 
4034 	v = ntohl(c.err_to_clearinit);
4035 	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
4036 	if (state != NULL) {
4037 		if (v & F_FW_HELLO_CMD_ERR)
4038 			*state = DEV_STATE_ERR;
4039 		else if (v & F_FW_HELLO_CMD_INIT)
4040 			*state = DEV_STATE_INIT;
4041 		else
4042 			*state = DEV_STATE_UNINIT;
4043 	}
4044 
4045 	/*
4046 	 * If we're not the Master PF then we need to wait around for the
4047 	 * Master PF Driver to finish setting up the adapter.
4048 	 *
4049 	 * Note that we also do this wait if we're a non-Master-capable PF and
4050 	 * there is no current Master PF; a Master PF may show up momentarily
4051 	 * and we wouldn't want to fail pointlessly.  (This can happen when an
4052 	 * OS loads lots of different drivers rapidly at the same time).  In
4053 	 * this case, the Master PF returned by the firmware will be
4054 	 * M_PCIE_FW_MASTER so the test below will work ...
4055 	 */
4056 	if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
4057 	    master_mbox != mbox) {
4058 		int waiting = FW_CMD_HELLO_TIMEOUT;
4059 
4060 		/*
4061 		 * Wait for the firmware to either indicate an error or
4062 		 * initialized state.  If we see either of these we bail out
4063 		 * and report the issue to the caller.  If we exhaust the
4064 		 * "hello timeout" and we haven't exhausted our retries, try
4065 		 * again.  Otherwise bail with a timeout error.
4066 		 */
4067 		for (;;) {
4068 			u32 pcie_fw;
4069 
4070 			msleep(50);
4071 			waiting -= 50;
4072 
4073 			/*
4074 			 * If neither Error nor Initialialized are indicated
4075 			 * by the firmware keep waiting till we exhaust our
4076 			 * timeout ... and then retry if we haven't exhausted
4077 			 * our retries ...
4078 			 */
4079 			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
4080 			if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
4081 				if (waiting <= 0) {
4082 					if (retries-- > 0)
4083 						goto retry;
4084 
4085 					return (-ETIMEDOUT);
4086 				}
4087 				continue;
4088 			}
4089 
4090 			/*
4091 			 * We either have an Error or Initialized condition
4092 			 * report errors preferentially.
4093 			 */
4094 			if (state != NULL) {
4095 				if (pcie_fw & F_PCIE_FW_ERR)
4096 					*state = DEV_STATE_ERR;
4097 				else if (pcie_fw & F_PCIE_FW_INIT)
4098 					*state = DEV_STATE_INIT;
4099 			}
4100 
4101 			/*
4102 			 * If we arrived before a Master PF was selected and
4103 			 * there's not a valid Master PF, grab its identity
4104 			 * for our caller.
4105 			 */
4106 			if (master_mbox == M_PCIE_FW_MASTER &&
4107 			    (pcie_fw & F_PCIE_FW_MASTER_VLD))
4108 				master_mbox = G_PCIE_FW_MASTER(pcie_fw);
4109 			break;
4110 		}
4111 	}
4112 
4113 	return (master_mbox);
4114 }
4115 
4116 /*
4117  *	t4_fw_bye - end communication with FW
4118  *	@adap: the adapter
4119  *	@mbox: mailbox to use for the FW command
4120  *
4121  *	Issues a command to terminate communication with FW.
4122  */
4123 int
4124 t4_fw_bye(struct adapter *adap, unsigned int mbox)
4125 {
4126 	struct fw_bye_cmd c;
4127 
4128 	(void) memset(&c, 0, sizeof (c));
4129 	/* LINTED: E_CONSTANT_CONDITION */
4130 	INIT_CMD(c, BYE, WRITE);
4131 	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4132 }
4133 
4134 /*
4135  *	t4_init_cmd - ask FW to initialize the device
4136  *	@adap: the adapter
4137  *	@mbox: mailbox to use for the FW command
4138  *
4139  *	Issues a command to FW to partially initialize the device.  This
4140  *	performs initialization that generally doesn't depend on user input.
4141  */
4142 int
4143 t4_early_init(struct adapter *adap, unsigned int mbox)
4144 {
4145 	struct fw_initialize_cmd c;
4146 
4147 	(void) memset(&c, 0, sizeof (c));
4148 	/* LINTED: E_CONSTANT_CONDITION */
4149 	INIT_CMD(c, INITIALIZE, WRITE);
4150 	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4151 }
4152 
4153 /*
4154  *	t4_fw_reset - issue a reset to FW
4155  *	@adap: the adapter
4156  *	@mbox: mailbox to use for the FW command
4157  *	@reset: specifies the type of reset to perform
4158  *
4159  *	Issues a reset command of the specified type to FW.
4160  */
4161 int
4162 t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
4163 {
4164 	struct fw_reset_cmd c;
4165 
4166 	(void) memset(&c, 0, sizeof (c));
4167 	/* LINTED: E_CONSTANT_CONDITION */
4168 	INIT_CMD(c, RESET, WRITE);
4169 	c.val = htonl(reset);
4170 	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4171 }
4172 
4173 /*
4174  *	t4_fw_config_file - setup an adapter via a Configuration File
4175  *	@adap: the adapter
4176  * 	@mbox: mailbox to use for the FW command
4177  *	@mtype: the memory type where the Configuration File is located
4178  *	@maddr: the memory address where the Configuration File is located
4179  *	@finiver: return value for CF [fini] version
4180  *	@finicsum: return value for CF [fini] checksum
4181  *	@cfcsum: return value for CF computed checksum
4182  *
4183  *	Issue a command to get the firmware to process the Configuration
4184  *	File located at the specified mtype/maddress.  If the Configuration
4185  *	File is processed successfully and return value pointers are
4186  *	provided, the Configuration File "[fini] section version and
4187  *	checksum values will be returned along with the computed checksum.
4188  *	It's up to the caller to decide how it wants to respond to the
4189  *	checksums not matching but it recommended that a prominant warning
4190  *	be emitted in order to help people rapidly identify changed or
4191  *	corrupted Configuration Files.
4192  *
4193  *	Also note that it's possible to modify things like "niccaps",
4194  *	"toecaps",etc. between processing the Configuration File and telling
4195  *	the firmware to use the new configuration.  Callers which want to
4196  *	do this will need to "hand-roll" their own CAPS_CONFIGS commands for
4197  *	Configuration Files if they want to do this.
4198  */
4199 int
4200 t4_fw_config_file(struct adapter *adap, unsigned int mbox, unsigned int mtype,
4201     unsigned int maddr, u32 *finiver, u32 *finicsum, u32 *cfcsum)
4202 {
4203 	struct fw_caps_config_cmd caps_cmd;
4204 	int ret;
4205 
4206 	/*
4207 	 * Tell the firmware to process the indicated Configuration File.
4208 	 * If there are no errors and the caller has provided return value
4209 	 * pointers for the [fini] section version, checksum and computed
4210 	 * checksum, pass those back to the caller.
4211 	 */
4212 	(void) memset(&caps_cmd, 0, sizeof (caps_cmd));
4213 	caps_cmd.op_to_write =
4214 	    htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4215 	    F_FW_CMD_REQUEST |
4216 	    F_FW_CMD_READ);
4217 	caps_cmd.cfvalid_to_len16 =
4218 	    htonl(F_FW_CAPS_CONFIG_CMD_CFVALID |
4219 	    V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4220 	    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
4221 	    FW_LEN16(caps_cmd));
4222 	ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof (caps_cmd), &caps_cmd);
4223 	if (ret < 0)
4224 		return (ret);
4225 
4226 	if (finiver != NULL)
4227 		*finiver = ntohl(caps_cmd.finiver);
4228 	if (finicsum != NULL)
4229 		*finicsum = ntohl(caps_cmd.finicsum);
4230 	if (cfcsum != NULL)
4231 		*cfcsum = ntohl(caps_cmd.cfcsum);
4232 
4233 	/*
4234 	 * And now tell the firmware to use the configuration we just loaded.
4235 	 */
4236 	caps_cmd.op_to_write =
4237 	    htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4238 	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
4239 	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4240 	return (t4_wr_mbox(adap, mbox, &caps_cmd, sizeof (caps_cmd), NULL));
4241 }
4242 
4243 /*
4244  *	t4_fixup_host_params - fix up host-dependent parameters
4245  *	@adap: the adapter
4246  *	@page_size: the host's Base Page Size
4247  *	@cache_line_size: the host's Cache Line Size
4248  *
4249  *	Various registers in T4 contain values which are dependent on the
4250  *	host's Base Page and Cache Line Sizes.  This function will fix all of
4251  *	those registers with the appropriate values as passed in ...
4252  */
4253 int
4254 t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
4255     unsigned int cache_line_size)
4256 {
4257 	unsigned int page_shift = fls(page_size) - 1;
4258 	unsigned int sge_hps = page_shift - 10;
4259 	unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
4260 	unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
4261 	unsigned int fl_align_log = fls(fl_align) - 1;
4262 
4263 	t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE,
4264 	    V_HOSTPAGESIZEPF0(sge_hps) |
4265 	    V_HOSTPAGESIZEPF1(sge_hps) |
4266 	    V_HOSTPAGESIZEPF2(sge_hps) |
4267 	    V_HOSTPAGESIZEPF3(sge_hps) |
4268 	    V_HOSTPAGESIZEPF4(sge_hps) |
4269 	    V_HOSTPAGESIZEPF5(sge_hps) |
4270 	    V_HOSTPAGESIZEPF6(sge_hps) |
4271 	    V_HOSTPAGESIZEPF7(sge_hps));
4272 
4273 	t4_set_reg_field(adap, A_SGE_CONTROL,
4274 	    V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
4275 	    F_EGRSTATUSPAGESIZE,
4276 	    V_INGPADBOUNDARY(fl_align_log - 5) |
4277 	    V_EGRSTATUSPAGESIZE(stat_len != 64));
4278 
4279 	/*
4280 	 * Adjust various SGE Free List Host Buffer Sizes.
4281 	 *
4282 	 * This is something of a crock since we're using fixed indices into
4283 	 * the array which are also known by the sge.c code and the T4
4284 	 * Firmware Configuration File.  We need to come up with a much better
4285 	 * approach to managing this array.  For now, the first four entries
4286 	 * are:
4287 	 *
4288 	 *   0: Host Page Size
4289 	 *   1: 64KB
4290 	 *   2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
4291 	 *   3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
4292 	 *
4293 	 * For the single-MTU buffers in unpacked mode we need to include
4294 	 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
4295 	 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
4296 	 * Padding boundry.  All of these are accommodated in the Factory
4297 	 * Default Firmware Configuration File but we need to adjust it for
4298 	 * this host's cache line size.
4299 	 */
4300 	t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size);
4301 	t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2,
4302 	    (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align-1) &
4303 	    ~(fl_align-1));
4304 	t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3,
4305 	    (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align-1) &
4306 	    ~(fl_align-1));
4307 
4308 	t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12));
4309 
4310 	return (0);
4311 }
4312 
4313 /*
4314  * 	t4_fw_initialize - ask FW to initialize the device
4315  * 	@adap: the adapter
4316  * 	@mbox: mailbox to use for the FW command
4317  *
4318  * 	Issues a command to FW to partially initialize the device.  This
4319  * 	performs initialization that generally doesn't depend on user input.
4320  */
4321 int
4322 t4_fw_initialize(struct adapter *adap, unsigned int mbox)
4323 {
4324 	struct fw_initialize_cmd c;
4325 
4326 	(void) memset(&c, 0, sizeof (c));
4327 	/* LINTED: E_CONSTANT_CONDITION */
4328 	INIT_CMD(c, INITIALIZE, WRITE);
4329 	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4330 }
4331 
4332 /*
4333  *	t4_query_params - query FW or device parameters
4334  *	@adap: the adapter
4335  *	@mbox: mailbox to use for the FW command
4336  *	@pf: the PF
4337  *	@vf: the VF
4338  *	@nparams: the number of parameters
4339  *	@params: the parameter names
4340  *	@val: the parameter values
4341  *
4342  *	Reads the value of FW or device parameters.  Up to 7 parameters can be
4343  *	queried at once.
4344  */
4345 int
4346 t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4347     unsigned int vf, unsigned int nparams, const u32 *params, u32 *val)
4348 {
4349 	int i, ret;
4350 	struct fw_params_cmd c;
4351 	__be32 *p = &c.param[0].mnem;
4352 
4353 	if (nparams > 7)
4354 		return (-EINVAL);
4355 
4356 	(void) memset(&c, 0, sizeof (c));
4357 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4358 	    F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) |
4359 	    V_FW_PARAMS_CMD_VFN(vf));
4360 	c.retval_len16 = htonl(FW_LEN16(c));
4361 
4362 	for (i = 0; i < nparams; i++, p += 2, params++)
4363 		*p = htonl(*params);
4364 
4365 	ret = t4_wr_mbox(adap, mbox, &c, sizeof (c), &c);
4366 	if (ret == 0)
4367 		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
4368 			*val++ = ntohl(*p);
4369 	return (ret);
4370 }
4371 
4372 /*
4373  *	t4_set_params - sets FW or device parameters
4374  *	@adap: the adapter
4375  *	@mbox: mailbox to use for the FW command
4376  *	@pf: the PF
4377  *	@vf: the VF
4378  *	@nparams: the number of parameters
4379  *	@params: the parameter names
4380  *	@val: the parameter values
4381  *
4382  *	Sets the value of FW or device parameters.  Up to 7 parameters can be
4383  *	specified at once.
4384  */
4385 int
4386 t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
4387     unsigned int vf, unsigned int nparams, const u32 *params, const u32 *val)
4388 {
4389 	struct fw_params_cmd c;
4390 	__be32 *p = &c.param[0].mnem;
4391 
4392 	if (nparams > 7)
4393 		return (-EINVAL);
4394 
4395 	(void) memset(&c, 0, sizeof (c));
4396 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4397 	    F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) |
4398 	    V_FW_PARAMS_CMD_VFN(vf));
4399 	c.retval_len16 = htonl(FW_LEN16(c));
4400 
4401 	while (nparams--) {
4402 		*p++ = htonl(*params);
4403 		params++;
4404 		*p++ = htonl(*val);
4405 		val++;
4406 	}
4407 
4408 	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4409 }
4410 
4411 /*
4412  *	t4_cfg_pfvf - configure PF/VF resource limits
4413  *	@adap: the adapter
4414  *	@mbox: mailbox to use for the FW command
4415  *	@pf: the PF being configured
4416  *	@vf: the VF being configured
4417  *	@txq: the max number of egress queues
4418  *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
4419  *	@rxqi: the max number of interrupt-capable ingress queues
4420  *	@rxq: the max number of interruptless ingress queues
4421  *	@tc: the PCI traffic class
4422  *	@vi: the max number of virtual interfaces
4423  *	@cmask: the channel access rights mask for the PF/VF
4424  *	@pmask: the port access rights mask for the PF/VF
4425  *	@nexact: the maximum number of exact MPS filters
4426  *	@rcaps: read capabilities
4427  *	@wxcaps: write/execute capabilities
4428  *
4429  *	Configures resource limits and capabilities for a physical or virtual
4430  *	function.
4431  */
4432 int
4433 t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
4434     unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
4435     unsigned int rxqi, unsigned int rxq, unsigned int tc, unsigned int vi,
4436     unsigned int cmask, unsigned int pmask, unsigned int nexact,
4437     unsigned int rcaps, unsigned int wxcaps)
4438 {
4439 	struct fw_pfvf_cmd c;
4440 
4441 	(void) memset(&c, 0, sizeof (c));
4442 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
4443 	    F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) | V_FW_PFVF_CMD_VFN(vf));
4444 	c.retval_len16 = htonl(FW_LEN16(c));
4445 	c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
4446 	    V_FW_PFVF_CMD_NIQ(rxq));
4447 	c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) |
4448 	    V_FW_PFVF_CMD_PMASK(pmask) | V_FW_PFVF_CMD_NEQ(txq));
4449 	c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) |
4450 	    V_FW_PFVF_CMD_NEXACTF(nexact));
4451 	c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) |
4452 	    V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
4453 	    V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
4454 	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4455 }
4456 
4457 /*
4458  *	t4_alloc_vi_func - allocate a virtual interface
4459  *	@adap: the adapter
4460  *	@mbox: mailbox to use for the FW command
4461  *	@port: physical port associated with the VI
4462  *	@pf: the PF owning the VI
4463  *	@vf: the VF owning the VI
4464  *	@nmac: number of MAC addresses needed (1 to 5)
4465  *	@mac: the MAC addresses of the VI
4466  *	@rss_size: size of RSS table slice associated with this VI
4467  *
4468  *	Allocates a virtual interface for the given physical port.  If @mac is
4469  *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
4470  *	@mac should be large enough to hold @nmac Ethernet addresses, they are
4471  *	stored consecutively so the space needed is @nmac * 6 bytes.
4472  *	Returns a negative error number or the non-negative VI id.
4473  */
4474 int
4475 t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
4476     unsigned int port, unsigned int pf, unsigned int vf,
4477     unsigned int nmac, u8 *mac, unsigned int *rss_size,
4478     unsigned int portfunc, unsigned int idstype)
4479 {
4480 	int ret;
4481 	struct fw_vi_cmd c;
4482 
4483 	(void) memset(&c, 0, sizeof (c));
4484 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4485 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC |
4486 	    V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
4487 	c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
4488 	c.type_to_viid = htons(V_FW_VI_CMD_TYPE(idstype) |
4489 	    V_FW_VI_CMD_FUNC(portfunc));
4490 	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
4491 	c.nmac = nmac - 1;
4492 
4493 	ret = t4_wr_mbox(adap, mbox, &c, sizeof (c), &c);
4494 	if (ret != 0)
4495 		return (ret);
4496 
4497 	if (mac != NULL) {
4498 		(void) memcpy(mac, c.mac, sizeof (c.mac));
4499 		switch (nmac) {
4500 		case 5:
4501 			(void) memcpy(mac + 24, c.nmac3, sizeof (c.nmac3));
4502 		/* FALLTHRU */
4503 		case 4:
4504 			(void) memcpy(mac + 18, c.nmac2, sizeof (c.nmac2));
4505 		/* FALLTHRU */
4506 		case 3:
4507 			(void) memcpy(mac + 12, c.nmac1, sizeof (c.nmac1));
4508 		/* FALLTHRU */
4509 		case 2:
4510 			(void) memcpy(mac + 6,  c.nmac0, sizeof (c.nmac0));
4511 		}
4512 	}
4513 	if (rss_size != NULL)
4514 		*rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.norss_rsssize));
4515 	return G_FW_VI_CMD_VIID(htons(c.type_to_viid));
4516 }
4517 
4518 /*
4519  *	t4_alloc_vi - allocate an [Ethernet Function] virtual interface
4520  *	@adap: the adapter
4521  *	@mbox: mailbox to use for the FW command
4522  *	@port: physical port associated with the VI
4523  *	@pf: the PF owning the VI
4524  *	@vf: the VF owning the VI
4525  *	@nmac: number of MAC addresses needed (1 to 5)
4526  *	@mac: the MAC addresses of the VI
4527  *	@rss_size: size of RSS table slice associated with this VI
4528  *
4529  *	backwards compatible and convieniance routine to allocate a Virtual
4530  *	Interface with a Ethernet Port Application Function and Intrustion
4531  *	Detection System disabled.
4532  */
4533 int
4534 t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
4535     unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
4536     unsigned int *rss_size)
4537 {
4538 	return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
4539 	    FW_VI_FUNC_ETH, 0);
4540  }
4541 
4542 /*
4543  *	t4_free_vi - free a virtual interface
4544  *	@adap: the adapter
4545  *	@mbox: mailbox to use for the FW command
4546  *	@pf: the PF owning the VI
4547  *	@vf: the VF owning the VI
4548  *	@viid: virtual interface identifiler
4549  *
4550  *	Free a previously allocated virtual interface.
4551  */
4552 int
4553 t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
4554     unsigned int vf, unsigned int viid)
4555 {
4556 	struct fw_vi_cmd c;
4557 
4558 	(void) memset(&c, 0, sizeof (c));
4559 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) |
4560 	    F_FW_CMD_REQUEST |
4561 	    F_FW_CMD_EXEC |
4562 	    V_FW_VI_CMD_PFN(pf) |
4563 	    V_FW_VI_CMD_VFN(vf));
4564 	c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c));
4565 	c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid));
4566 
4567 	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), &c));
4568 }
4569 
4570 /*
4571  *	t4_set_rxmode - set Rx properties of a virtual interface
4572  *	@adap: the adapter
4573  *	@mbox: mailbox to use for the FW command
4574  *	@viid: the VI id
4575  *	@mtu: the new MTU or -1
4576  *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
4577  *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
4578  *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
4579  *	@vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change
4580  *	@sleep_ok: if true we may sleep while awaiting command completion
4581  *
4582  *	Sets Rx properties of a virtual interface.
4583  */
4584 int
4585 t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
4586     int mtu, int promisc, int all_multi, int bcast, int vlanex, bool sleep_ok)
4587 {
4588 	struct fw_vi_rxmode_cmd c;
4589 
4590 	/* convert to FW values */
4591 	if (mtu < 0)
4592 		mtu = M_FW_VI_RXMODE_CMD_MTU;
4593 	if (promisc < 0)
4594 		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
4595 	if (all_multi < 0)
4596 		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
4597 	if (bcast < 0)
4598 		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
4599 	if (vlanex < 0)
4600 		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
4601 
4602 	(void) memset(&c, 0, sizeof (c));
4603 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST |
4604 	    F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid));
4605 	c.retval_len16 = htonl(FW_LEN16(c));
4606 	c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) |
4607 	    V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
4608 	    V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
4609 	    V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
4610 	    V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
4611 	return (t4_wr_mbox_meat(adap, mbox, &c, sizeof (c), NULL, sleep_ok));
4612 }
4613 
4614 /*
4615  *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
4616  *	@adap: the adapter
4617  *	@mbox: mailbox to use for the FW command
4618  *	@viid: the VI id
4619  *	@free: if true any existing filters for this VI id are first removed
4620  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
4621  *	@addr: the MAC address(es)
4622  *	@idx: where to store the index of each allocated filter
4623  *	@hash: pointer to hash address filter bitmap
4624  *	@sleep_ok: call is allowed to sleep
4625  *
4626  *	Allocates an exact-match filter for each of the supplied addresses and
4627  *	sets it to the corresponding address.  If @idx is not %NULL it should
4628  *	have at least @naddr entries, each of which will be set to the index of
4629  *	the filter allocated for the corresponding MAC address.  If a filter
4630  *	could not be allocated for an address its index is set to 0xffff.
4631  *	If @hash is not %NULL addresses that fail to allocate an exact filter
4632  *	are hashed and update the hash filter bitmap pointed at by @hash.
4633  *
4634  *	Returns a negative error number or the number of filters allocated.
4635  */
4636 int
4637 t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, unsigned int viid,
4638     bool free, unsigned int naddr, const u8 **addr, u16 *idx, u64 *hash,
4639     bool sleep_ok)
4640 {
4641 	int offset, ret = 0;
4642 	struct fw_vi_mac_cmd c;
4643 	unsigned int nfilters = 0;
4644 	unsigned int max_naddr = is_t4(adap->params.chip) ?
4645 	    NUM_MPS_CLS_SRAM_L_INSTANCES :
4646 	    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
4647 	unsigned int rem = naddr;
4648 
4649 	if (naddr > max_naddr)
4650 		return (-EINVAL);
4651 
4652 	for (offset = 0; offset < naddr; /* */) {
4653 		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
4654 		    ? rem : ARRAY_SIZE(c.u.exact));
4655 		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
4656 		    u.exact[fw_naddr]), 16);
4657 		struct fw_vi_mac_exact *p;
4658 		int i;
4659 
4660 		(void) memset(&c, 0, sizeof (c));
4661 		c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4662 		    F_FW_CMD_REQUEST |
4663 		    F_FW_CMD_WRITE |
4664 		    V_FW_CMD_EXEC(free) |
4665 		    V_FW_VI_MAC_CMD_VIID(viid));
4666 		c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) |
4667 		    V_FW_CMD_LEN16(len16));
4668 
4669 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4670 			p->valid_to_idx = htons(
4671 			    F_FW_VI_MAC_CMD_VALID |
4672 			    V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
4673 			(void) memcpy(p->macaddr, addr[offset+i],
4674 			    sizeof (p->macaddr));
4675 		}
4676 
4677 		/*
4678 		 * It's okay if we run out of space in our MAC address arena.
4679 		 * Some of the addresses we submit may get stored so we need
4680 		 * to run through the reply to see what the results were ...
4681 		 */
4682 		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof (c), &c, sleep_ok);
4683 		if (ret && ret != -FW_ENOMEM)
4684 			break;
4685 
4686 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4687 			u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
4688 
4689 			if (idx != NULL)
4690 				idx[offset+i] =
4691 				    (index >= max_naddr ?
4692 				    0xffff : index);
4693 			if (index < max_naddr)
4694 				nfilters++;
4695 			else if (hash != NULL)
4696 				*hash |=
4697 				    (1ULL << hash_mac_addr(addr[offset+i]));
4698 		}
4699 
4700 		free = false;
4701 		offset += fw_naddr;
4702 		rem -= fw_naddr;
4703 	}
4704 
4705 	if (ret == 0 || ret == -FW_ENOMEM)
4706 		ret = nfilters;
4707 	return (ret);
4708 }
4709 
4710 /*
4711  *	t4_change_mac - modifies the exact-match filter for a MAC address
4712  *	@adap: the adapter
4713  *	@mbox: mailbox to use for the FW command
4714  *	@viid: the VI id
4715  *	@idx: index of existing filter for old value of MAC address, or -1
4716  *	@addr: the new MAC address value
4717  *	@persist: whether a new MAC allocation should be persistent
4718  *	@add_smt: if true also add the address to the HW SMT
4719  *
4720  *	Modifies an exact-match filter and sets it to the new MAC address if
4721  *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
4722  *	latter case the address is added persistently if @persist is %true.
4723  *
4724  *	Note that in general it is not possible to modify the value of a given
4725  *	filter so the generic way to modify an address filter is to free the one
4726  *	being used by the old address value and allocate a new filter for the
4727  *	new address value.
4728  *
4729  *	Returns a negative error number or the index of the filter with the new
4730  *	MAC value.  Note that this index may differ from @idx.
4731  */
4732 int
4733 t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
4734     int idx, const u8 *addr, bool persist, bool add_smt)
4735 {
4736 	int ret, mode;
4737 	struct fw_vi_mac_cmd c;
4738 	struct fw_vi_mac_exact *p = c.u.exact;
4739 	unsigned int max_mac_addr = is_t4(adap->params.chip) ?
4740 	    NUM_MPS_CLS_SRAM_L_INSTANCES :
4741 	    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
4742 
4743 
4744 	if (idx < 0)				/* new allocation */
4745 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
4746 	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
4747 
4748 	(void) memset(&c, 0, sizeof (c));
4749 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
4750 	    F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid));
4751 	c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1));
4752 	p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID |
4753 	    V_FW_VI_MAC_CMD_SMAC_RESULT(mode) | V_FW_VI_MAC_CMD_IDX(idx));
4754 	(void) memcpy(p->macaddr, addr, sizeof (p->macaddr));
4755 
4756 	ret = t4_wr_mbox(adap, mbox, &c, sizeof (c), &c);
4757 	if (ret == 0) {
4758 		ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
4759 		if (ret >= max_mac_addr)
4760 			ret = -ENOMEM;
4761 	}
4762 	return (ret);
4763 }
4764 
4765 /*
4766  *	t4_set_addr_hash - program the MAC inexact-match hash filter
4767  *	@adap: the adapter
4768  *	@mbox: mailbox to use for the FW command
4769  *	@viid: the VI id
4770  *	@ucast: whether the hash filter should also match unicast addresses
4771  *	@vec: the value to be written to the hash filter
4772  *	@sleep_ok: call is allowed to sleep
4773  *
4774  *	Sets the 64-bit inexact-match hash filter for a virtual interface.
4775  */
4776 int
4777 t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
4778     bool ucast, u64 vec, bool sleep_ok)
4779 {
4780 	struct fw_vi_mac_cmd c;
4781 
4782 	(void) memset(&c, 0, sizeof (c));
4783 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
4784 	    F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid));
4785 	c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN |
4786 	    V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1));
4787 	c.u.hash.hashvec = cpu_to_be64(vec);
4788 	return (t4_wr_mbox_meat(adap, mbox, &c, sizeof (c), NULL, sleep_ok));
4789 }
4790 
4791 /*
4792  *	t4_enable_vi - enable/disable a virtual interface
4793  *	@adap: the adapter
4794  *	@mbox: mailbox to use for the FW command
4795  *	@viid: the VI id
4796  *	@rx_en: 1=enable Rx, 0=disable Rx
4797  *	@tx_en: 1=enable Tx, 0=disable Tx
4798  *
4799  *	Enables/disables a virtual interface.
4800  */
4801 int
4802 t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
4803     bool rx_en, bool tx_en)
4804 {
4805 	struct fw_vi_enable_cmd c;
4806 
4807 	(void) memset(&c, 0, sizeof (c));
4808 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
4809 	    F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
4810 	c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
4811 	    V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
4812 	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4813 }
4814 
4815 /*
4816  *	t4_identify_port - identify a VI's port by blinking its LED
4817  *	@adap: the adapter
4818  *	@mbox: mailbox to use for the FW command
4819  *	@viid: the VI id
4820  *	@nblinks: how many times to blink LED at 2.5 Hz
4821  *
4822  *	Identifies a VI's port by blinking its LED.
4823  */
4824 int
4825 t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
4826     unsigned int nblinks)
4827 {
4828 	struct fw_vi_enable_cmd c;
4829 
4830 	(void) memset(&c, 0, sizeof (c));
4831 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
4832 	    F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
4833 	c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
4834 	c.blinkdur = htons(nblinks);
4835 	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4836 }
4837 
4838 /*
4839  *	t4_iq_start_stop - enable/disable an ingress queue and its FLs
4840  *	@adap: the adapter
4841  *	@mbox: mailbox to use for the FW command
4842  *	@start: %true to enable the queues, %false to disable them
4843  *	@pf: the PF owning the queues
4844  *	@vf: the VF owning the queues
4845  *	@iqid: ingress queue id
4846  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
4847  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
4848  *
4849  *	Starts or stops an ingress queue and its associated FLs, if any.
4850  */
4851 int
4852 t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
4853     unsigned int pf, unsigned int vf, unsigned int iqid, unsigned int fl0id,
4854     unsigned int fl1id)
4855 {
4856 	struct fw_iq_cmd c;
4857 
4858 	(void) memset(&c, 0, sizeof (c));
4859 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4860 	    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
4861 	    V_FW_IQ_CMD_VFN(vf));
4862 	c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) |
4863 	    V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
4864 	c.iqid = htons(iqid);
4865 	c.fl0id = htons(fl0id);
4866 	c.fl1id = htons(fl1id);
4867 	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4868 }
4869 
4870 /*
4871  *	t4_iq_free - free an ingress queue and its FLs
4872  *	@adap: the adapter
4873  *	@mbox: mailbox to use for the FW command
4874  *	@pf: the PF owning the queues
4875  *	@vf: the VF owning the queues
4876  *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
4877  *	@iqid: ingress queue id
4878  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
4879  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
4880  *
4881  *	Frees an ingress queue and its associated FLs, if any.
4882  */
4883 int
4884 t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4885     unsigned int vf, unsigned int iqtype, unsigned int iqid, unsigned int fl0id,
4886     unsigned int fl1id)
4887 {
4888 	struct fw_iq_cmd c;
4889 
4890 	(void) memset(&c, 0, sizeof (c));
4891 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4892 	    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
4893 	    V_FW_IQ_CMD_VFN(vf));
4894 	c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c));
4895 	c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype));
4896 	c.iqid = htons(iqid);
4897 	c.fl0id = htons(fl0id);
4898 	c.fl1id = htons(fl1id);
4899 	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4900 }
4901 
4902 /*
4903  *	t4_eth_eq_free - free an Ethernet egress queue
4904  *	@adap: the adapter
4905  *	@mbox: mailbox to use for the FW command
4906  *	@pf: the PF owning the queue
4907  *	@vf: the VF owning the queue
4908  *	@eqid: egress queue id
4909  *
4910  *	Frees an Ethernet egress queue.
4911  */
4912 int
4913 t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4914     unsigned int vf, unsigned int eqid)
4915 {
4916 	struct fw_eq_eth_cmd c;
4917 
4918 	(void) memset(&c, 0, sizeof (c));
4919 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
4920 	    F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) |
4921 	    V_FW_EQ_ETH_CMD_VFN(vf));
4922 	c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
4923 	c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid));
4924 	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4925 }
4926 
4927 /*
4928  *	t4_ctrl_eq_free - free a control egress queue
4929  *	@adap: the adapter
4930  *	@mbox: mailbox to use for the FW command
4931  *	@pf: the PF owning the queue
4932  *	@vf: the VF owning the queue
4933  *	@eqid: egress queue id
4934  *
4935  *	Frees a control egress queue.
4936  */
4937 int
4938 t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4939     unsigned int vf, unsigned int eqid)
4940 {
4941 	struct fw_eq_ctrl_cmd c;
4942 
4943 	(void) memset(&c, 0, sizeof (c));
4944 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
4945 	    F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) |
4946 	    V_FW_EQ_CTRL_CMD_VFN(vf));
4947 	c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
4948 	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid));
4949 	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4950 }
4951 
4952 /*
4953  *	t4_ofld_eq_free - free an offload egress queue
4954  *	@adap: the adapter
4955  *	@mbox: mailbox to use for the FW command
4956  *	@pf: the PF owning the queue
4957  *	@vf: the VF owning the queue
4958  *	@eqid: egress queue id
4959  *
4960  *	Frees a control egress queue.
4961  */
4962 int
4963 t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4964     unsigned int vf, unsigned int eqid)
4965 {
4966 	struct fw_eq_ofld_cmd c;
4967 
4968 	(void) memset(&c, 0, sizeof (c));
4969 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
4970 	    F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) |
4971 	    V_FW_EQ_OFLD_CMD_VFN(vf));
4972 	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
4973 	c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid));
4974 	return (t4_wr_mbox(adap, mbox, &c, sizeof (c), NULL));
4975 }
4976 
4977 /*
4978  *	t4_handle_fw_rpl - process a FW reply message
4979  *	@adap: the adapter
4980  *	@rpl: start of the FW message
4981  *
4982  *	Processes a FW message, such as link state change messages.
4983  */
4984 int
4985 t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
4986 {
4987 	u8 opcode = *(const u8 *)rpl;
4988 
4989 	const struct fw_port_cmd *p = (const void *)rpl;
4990 	unsigned int action = G_FW_PORT_CMD_ACTION(ntohl(p->action_to_len16));
4991 
4992 	if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
4993 		/* link/module state change message */
4994 		int i;
4995 		const struct fw_port_cmd *p = (const void *)rpl;
4996 		int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid));
4997 		struct port_info *pi = NULL;
4998 		struct link_config *lc;
4999 		u32 stat = ntohl(p->u.info.lstatus_to_modtype);
5000 		unsigned char link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
5001 		unsigned char fc = 0;
5002 		unsigned short speed = 0;
5003 		u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
5004 
5005 		if (stat & F_FW_PORT_CMD_RXPAUSE)
5006 			fc |= PAUSE_RX;
5007 		if (stat & F_FW_PORT_CMD_TXPAUSE)
5008 			fc |= PAUSE_TX;
5009 		if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
5010 			speed = SPEED_100;
5011 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
5012 			speed = SPEED_1000;
5013 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
5014 			speed = SPEED_10000;
5015 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
5016 			speed = SPEED_40000;
5017 
5018 		for_each_port(adap, i) {
5019 			pi = adap2pinfo(adap, i);
5020 			if (pi->tx_chan == chan)
5021 				break;
5022 		}
5023 		lc = &pi->link_cfg;
5024 		if (link_ok != lc->link_ok || speed != lc->speed ||
5025 		    fc != lc->fc) {		/* something changed */
5026 			lc->link_ok = link_ok;
5027 			lc->speed = speed;
5028 			lc->fc = fc;
5029 			lc->supported = ntohs(p->u.info.pcap);
5030 			t4_os_link_changed(adap, i, link_ok);
5031 		}
5032 		if (mod != pi->mod_type) {
5033 			/* LINTED: E_ASSIGN_NARROW_CONV */
5034 			pi->mod_type = mod;
5035 			t4_os_portmod_changed(adap, i);
5036 		}
5037 	}
5038 	else {
5039 		CH_WARN_RATELIMIT(adap,
5040 		    "Unknown firmware reply 0x%x (0x%x)\n", opcode, action);
5041 		return -EINVAL;
5042 	}
5043 	return (0);
5044 }
5045 
5046 /*
5047  *	get_pci_mode - determine a card's PCI mode
5048  *	@adapter: the adapter
5049  *	@p: where to store the PCI settings
5050  *
5051  *	Determines a card's PCI mode and associated parameters, such as speed
5052  *	and width.
5053  */
5054 static void __devinit
5055 get_pci_mode(struct adapter *adapter, struct pci_params *p)
5056 {
5057 	u16 val;
5058 	u32 pcie_cap;
5059 
5060 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5061 	if (pcie_cap != NULL) {
5062 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
5063 		p->speed = val & PCI_EXP_LNKSTA_CLS;
5064 		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
5065 	}
5066 }
5067 
5068 /*
5069  *	init_link_config - initialize a link's SW state
5070  *	@lc: structure holding the link state
5071  *	@caps: link capabilities
5072  *
5073  *	Initializes the SW state maintained for each link, including the link's
5074  *	capabilities and default speed/flow-control/autonegotiation settings.
5075  */
5076 static void __devinit
5077 init_link_config(struct link_config *lc, unsigned short caps)
5078 {
5079 	lc->supported = caps;
5080 	lc->requested_speed = 0;
5081 	lc->speed = 0;
5082 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
5083 	if (lc->supported & FW_PORT_CAP_ANEG) {
5084 		lc->advertising = lc->supported & ADVERT_MASK;
5085 		lc->autoneg = AUTONEG_ENABLE;
5086 		lc->requested_fc |= PAUSE_AUTONEG;
5087 	} else {
5088 		lc->advertising = 0;
5089 		lc->autoneg = AUTONEG_DISABLE;
5090 	}
5091 }
5092 
5093 static int __devinit
5094 get_flash_params(struct adapter *adapter)
5095 {
5096 	int ret;
5097 	u32 info = 0;
5098 
5099 	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
5100 	if (!ret)
5101 		ret = sf1_read(adapter, 3, 0, 1, &info);
5102 	t4_write_reg(adapter, A_SF_OP, 0);	/* unlock SF */
5103 	if (ret < 0)
5104 		return (ret);
5105 
5106 	if ((info & 0xff) != 0x20)		/* not a Numonix flash */
5107 		return (-EINVAL);
5108 	info >>= 16;				/* log2 of size */
5109 	if (info >= 0x14 && info < 0x18)
5110 		adapter->params.sf_nsec = 1 << (info - 16);
5111 	else if (info == 0x18)
5112 		adapter->params.sf_nsec = 64;
5113 	else
5114 		return (-EINVAL);
5115 	adapter->params.sf_size = 1 << info;
5116 	return (0);
5117 }
5118 
5119 static void
5120 __devinit set_pcie_completion_timeout(struct adapter *adapter,
5121     u8 range)
5122 {
5123 	u16 val;
5124 	u32 pcie_cap;
5125 
5126 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
5127 	if (pcie_cap) {
5128 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
5129 		val &= 0xfff0;
5130 		val |= range ;
5131 		t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
5132 	}
5133 }
5134 
5135 /*
5136  *	t4_prep_adapter - prepare SW and HW for operation
5137  *	@adapter: the adapter
5138  *	@reset: if true perform a HW reset
5139  *
5140  *	Initialize adapter SW state for the various HW modules, set initial
5141  *	values for some adapter tunables, take PHYs out of reset, and
5142  *	initialize the MDIO interface.
5143  */
5144 int __devinit
5145 t4_prep_adapter(struct adapter *adapter)
5146 {
5147 	int ret, ver;
5148 	uint16_t device_id;
5149 	uint32_t pl_rev;
5150 
5151 	get_pci_mode(adapter, &adapter->params.pci);
5152 
5153 	pl_rev = t4_read_reg(adapter, A_PL_REV);
5154 
5155 	/*
5156 	 * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS
5157 	 * ADAPTER (VERSION << 4 | REVISION)
5158 	 */
5159 	/*
5160 	 * Retrieve adapter's device ID
5161 	 */
5162 	t4_os_pci_read_cfg2(adapter, PCI_CONF_DEVID, &device_id);
5163 	ver = CHELSIO_PCI_ID_VER(device_id);
5164 	adapter->params.chip = 0;
5165 	switch (ver) {
5166 		case CHELSIO_T4_FPGA:
5167 			adapter->params.chip |= CHELSIO_CHIP_FPGA;
5168 			/*FALLTHROUGH*/
5169 		case CHELSIO_T4:
5170 			adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
5171 			break;
5172 		case CHELSIO_T5_FPGA:
5173 			adapter->params.chip |= CHELSIO_CHIP_FPGA;
5174 			/*FALLTHROUGH*/
5175 		case CHELSIO_T5:
5176 			adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
5177 			break;
5178 		default:
5179 			CH_ERR(adapter, "Device %d is not supported\n", device_id);
5180 
5181 			return (-EINVAL);
5182 	}
5183 
5184 	/* T4A1 chip is no longer supported */
5185 	if (is_t4(adapter->params.chip) && (pl_rev == 1)) {
5186 		CH_ALERT(adapter, "T4 rev 1 chip is no longer supported\n");
5187 		return -EINVAL;
5188 	}
5189 	adapter->params.pci.vpd_cap_addr =
5190 	    t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
5191 
5192 	ret = get_flash_params(adapter);
5193 	if (ret < 0)
5194 		return ret;
5195 
5196 	if (is_fpga(adapter->params.chip)) {
5197 		/* FPGA */
5198 		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
5199 	} else {
5200 		/* ASIC */
5201 		adapter->params.cim_la_size = CIMLA_SIZE;
5202 	}
5203 
5204 	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
5205 
5206 	/*
5207 	 * Default port and clock for debugging in case we can't reach FW.
5208 	 */
5209 	adapter->params.nports = 1;
5210 	adapter->params.portvec = 1;
5211 	adapter->params.vpd.cclk = 50000;
5212 
5213 	/* Set pci completion timeout value to 4 seconds. */
5214 	set_pcie_completion_timeout(adapter, 0xd);
5215 	return 0;
5216 }
5217 
5218 int __devinit
5219 t4_port_init(struct port_info *p, int mbox, int pf, int vf)
5220 {
5221 	u8 addr[6];
5222 	int ret, i, j;
5223 	struct fw_port_cmd c;
5224 	unsigned int rss_size;
5225 	adapter_t *adap = p->adapter;
5226 
5227 	(void) memset(&c, 0, sizeof (c));
5228 
5229 	for (i = 0, j = -1; i <= p->port_id; i++) {
5230 		do {
5231 			j++;
5232 		} while ((adap->params.portvec & (1 << j)) == 0);
5233 	}
5234 
5235 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
5236 	    F_FW_CMD_REQUEST | F_FW_CMD_READ | V_FW_PORT_CMD_PORTID(j));
5237 	c.action_to_len16 = htonl(
5238 	    V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
5239 	    FW_LEN16(c));
5240 	ret = t4_wr_mbox(adap, mbox, &c, sizeof (c), &c);
5241 	if (ret != 0)
5242 		return (ret);
5243 
5244 	ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
5245 	if (ret < 0)
5246 		return (ret);
5247 
5248 	p->viid = (uint16_t)ret;
5249 	p->tx_chan = (uint8_t)j;
5250 	p->lport = (uint8_t)j;
5251 	p->rss_size = (uint16_t)rss_size;
5252 	t4_os_set_hw_addr(adap, p->port_id, addr);
5253 
5254 	ret = ntohl(c.u.info.lstatus_to_modtype);
5255 	p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
5256 	    G_FW_PORT_CMD_MDIOADDR(ret) : -1;
5257 	p->port_type = G_FW_PORT_CMD_PTYPE(ret);
5258 	p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
5259 
5260 	init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
5261 
5262 	return (0);
5263 }
5264