xref: /freebsd/sys/dev/cxgbe/common/t4_hw.c (revision aa0a1e58)
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "common.h"
31 #include "t4_regs.h"
32 #include "t4_regs_values.h"
33 #include "t4fw_interface.h"
34 
35 
36 /**
37  *	t4_wait_op_done_val - wait until an operation is completed
38  *	@adapter: the adapter performing the operation
39  *	@reg: the register to check for completion
40  *	@mask: a single-bit field within @reg that indicates completion
41  *	@polarity: the value of the field when the operation is completed
42  *	@attempts: number of check iterations
43  *	@delay: delay in usecs between iterations
44  *	@valp: where to store the value of the register at completion time
45  *
46  *	Wait until an operation is completed by checking a bit in a register
47  *	up to @attempts times.  If @valp is not NULL the value of the register
48  *	at the time it indicated completion is stored there.  Returns 0 if the
49  *	operation completes and	-EAGAIN	otherwise.
50  */
51 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
52 		        int polarity, int attempts, int delay, u32 *valp)
53 {
54 	while (1) {
55 		u32 val = t4_read_reg(adapter, reg);
56 
57 		if (!!(val & mask) == polarity) {
58 			if (valp)
59 				*valp = val;
60 			return 0;
61 		}
62 		if (--attempts == 0)
63 			return -EAGAIN;
64 		if (delay)
65 			udelay(delay);
66 	}
67 }
68 
69 /**
70  *	t4_set_reg_field - set a register field to a value
71  *	@adapter: the adapter to program
72  *	@addr: the register address
73  *	@mask: specifies the portion of the register to modify
74  *	@val: the new value for the register field
75  *
76  *	Sets a register field specified by the supplied mask to the
77  *	given value.
78  */
79 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
80 		      u32 val)
81 {
82 	u32 v = t4_read_reg(adapter, addr) & ~mask;
83 
84 	t4_write_reg(adapter, addr, v | val);
85 	(void) t4_read_reg(adapter, addr);      /* flush */
86 }
87 
88 /**
89  *	t4_read_indirect - read indirectly addressed registers
90  *	@adap: the adapter
91  *	@addr_reg: register holding the indirect address
92  *	@data_reg: register holding the value of the indirect register
93  *	@vals: where the read register values are stored
94  *	@nregs: how many indirect registers to read
95  *	@start_idx: index of first indirect register to read
96  *
97  *	Reads registers that are accessed indirectly through an address/data
98  *	register pair.
99  */
100 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
101 		      unsigned int data_reg, u32 *vals, unsigned int nregs,
102 		      unsigned int start_idx)
103 {
104 	while (nregs--) {
105 		t4_write_reg(adap, addr_reg, start_idx);
106 		*vals++ = t4_read_reg(adap, data_reg);
107 		start_idx++;
108 	}
109 }
110 
111 /**
112  *	t4_write_indirect - write indirectly addressed registers
113  *	@adap: the adapter
114  *	@addr_reg: register holding the indirect addresses
115  *	@data_reg: register holding the value for the indirect registers
116  *	@vals: values to write
117  *	@nregs: how many indirect registers to write
118  *	@start_idx: address of first indirect register to write
119  *
120  *	Writes a sequential block of registers that are accessed indirectly
121  *	through an address/data register pair.
122  */
123 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
124 		       unsigned int data_reg, const u32 *vals,
125 		       unsigned int nregs, unsigned int start_idx)
126 {
127 	while (nregs--) {
128 		t4_write_reg(adap, addr_reg, start_idx++);
129 		t4_write_reg(adap, data_reg, *vals++);
130 	}
131 }
132 
133 /*
134  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
135  */
136 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
137 			 u32 mbox_addr)
138 {
139 	for ( ; nflit; nflit--, mbox_addr += 8)
140 		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
141 }
142 
143 /*
144  * Handle a FW assertion reported in a mailbox.
145  */
146 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
147 {
148 	struct fw_debug_cmd asrt;
149 
150 	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
151 	CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
152 		 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
153 		 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
154 }
155 
156 #define X_CIM_PF_NOACCESS 0xeeeeeeee
157 /**
158  *	t4_wr_mbox_meat - send a command to FW through the given mailbox
159  *	@adap: the adapter
160  *	@mbox: index of the mailbox to use
161  *	@cmd: the command to write
162  *	@size: command length in bytes
163  *	@rpl: where to optionally store the reply
164  *	@sleep_ok: if true we may sleep while awaiting command completion
165  *
166  *	Sends the given command to FW through the selected mailbox and waits
167  *	for the FW to execute the command.  If @rpl is not %NULL it is used to
168  *	store the FW's reply to the command.  The command and its optional
169  *	reply are of the same length.  Some FW commands like RESET and
170  *	INITIALIZE can take a considerable amount of time to execute.
171  *	@sleep_ok determines whether we may sleep while awaiting the response.
172  *	If sleeping is allowed we use progressive backoff otherwise we spin.
173  *
174  *	The return value is 0 on success or a negative errno on failure.  A
175  *	failure can happen either because we are not able to execute the
176  *	command or FW executes it but signals an error.  In the latter case
177  *	the return value is the error code indicated by FW (negated).
178  */
179 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
180 		    void *rpl, bool sleep_ok)
181 {
182 	/*
183 	 * We delay in small increments at first in an effort to maintain
184 	 * responsiveness for simple, fast executing commands but then back
185 	 * off to larger delays to a maximum retry delay.
186 	 */
187 	static const int delay[] = {
188 		1, 1, 3, 5, 10, 10, 20, 50, 100, 200
189 	};
190 
191 	u32 v;
192 	u64 res;
193 	int i, ms, delay_idx;
194 	const __be64 *p = cmd;
195 
196 	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
197 	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
198 
199 	if ((size & 15) || size > MBOX_LEN)
200 		return -EINVAL;
201 
202 	v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
203 	for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
204 		v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
205 
206 	if (v != X_MBOWNER_PL)
207 		return v ? -EBUSY : -ETIMEDOUT;
208 
209 	for (i = 0; i < size; i += 8, p++)
210 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
211 
212 	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
213 	t4_read_reg(adap, ctl_reg);          /* flush write */
214 
215 	delay_idx = 0;
216 	ms = delay[0];
217 
218 	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
219 		if (sleep_ok) {
220 			ms = delay[delay_idx];  /* last element may repeat */
221 			if (delay_idx < ARRAY_SIZE(delay) - 1)
222 				delay_idx++;
223 			msleep(ms);
224 		} else
225 			mdelay(ms);
226 
227 		v = t4_read_reg(adap, ctl_reg);
228 		if (v == X_CIM_PF_NOACCESS)
229 			continue;
230 		if (G_MBOWNER(v) == X_MBOWNER_PL) {
231 			if (!(v & F_MBMSGVALID)) {
232 				t4_write_reg(adap, ctl_reg,
233 					     V_MBOWNER(X_MBOWNER_NONE));
234 				continue;
235 			}
236 
237 			res = t4_read_reg64(adap, data_reg);
238 			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
239 				fw_asrt(adap, data_reg);
240 				res = V_FW_CMD_RETVAL(EIO);
241 			} else if (rpl)
242 				get_mbox_rpl(adap, rpl, size / 8, data_reg);
243 			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
244 			return -G_FW_CMD_RETVAL((int)res);
245 		}
246 	}
247 
248 	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
249 	       *(const u8 *)cmd, mbox);
250 	return -ETIMEDOUT;
251 }
252 
253 /**
254  *	t4_mc_read - read from MC through backdoor accesses
255  *	@adap: the adapter
256  *	@addr: address of first byte requested
257  *	@data: 64 bytes of data containing the requested address
258  *	@ecc: where to store the corresponding 64-bit ECC word
259  *
260  *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
261  *	that covers the requested address @addr.  If @parity is not %NULL it
262  *	is assigned the 64-bit ECC word for the read data.
263  */
264 int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
265 {
266 	int i;
267 
268 	if (t4_read_reg(adap, A_MC_BIST_CMD) & F_START_BIST)
269 		return -EBUSY;
270 	t4_write_reg(adap, A_MC_BIST_CMD_ADDR, addr & ~0x3fU);
271 	t4_write_reg(adap, A_MC_BIST_CMD_LEN, 64);
272 	t4_write_reg(adap, A_MC_BIST_DATA_PATTERN, 0xc);
273 	t4_write_reg(adap, A_MC_BIST_CMD, V_BIST_OPCODE(1) | F_START_BIST |
274 		     V_BIST_CMD_GAP(1));
275 	i = t4_wait_op_done(adap, A_MC_BIST_CMD, F_START_BIST, 0, 10, 1);
276 	if (i)
277 		return i;
278 
279 #define MC_DATA(i) MC_BIST_STATUS_REG(A_MC_BIST_STATUS_RDATA, i)
280 
281 	for (i = 15; i >= 0; i--)
282 		*data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
283 	if (ecc)
284 		*ecc = t4_read_reg64(adap, MC_DATA(16));
285 #undef MC_DATA
286 	return 0;
287 }
288 
289 /**
290  *	t4_edc_read - read from EDC through backdoor accesses
291  *	@adap: the adapter
292  *	@idx: which EDC to access
293  *	@addr: address of first byte requested
294  *	@data: 64 bytes of data containing the requested address
295  *	@ecc: where to store the corresponding 64-bit ECC word
296  *
297  *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
298  *	that covers the requested address @addr.  If @parity is not %NULL it
299  *	is assigned the 64-bit ECC word for the read data.
300  */
301 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
302 {
303 	int i;
304 
305 	idx *= EDC_STRIDE;
306 	if (t4_read_reg(adap, A_EDC_BIST_CMD + idx) & F_START_BIST)
307 		return -EBUSY;
308 	t4_write_reg(adap, A_EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
309 	t4_write_reg(adap, A_EDC_BIST_CMD_LEN + idx, 64);
310 	t4_write_reg(adap, A_EDC_BIST_DATA_PATTERN + idx, 0xc);
311 	t4_write_reg(adap, A_EDC_BIST_CMD + idx,
312 		     V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
313 	i = t4_wait_op_done(adap, A_EDC_BIST_CMD + idx, F_START_BIST, 0, 10, 1);
314 	if (i)
315 		return i;
316 
317 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(A_EDC_BIST_STATUS_RDATA, i) + idx)
318 
319 	for (i = 15; i >= 0; i--)
320 		*data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
321 	if (ecc)
322 		*ecc = t4_read_reg64(adap, EDC_DATA(16));
323 #undef EDC_DATA
324 	return 0;
325 }
326 
327 /**
328  *	t4_mem_read - read EDC 0, EDC 1 or MC into buffer
329  *	@adap: the adapter
330  *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
331  *	@addr: address within indicated memory type
332  *	@len: amount of memory to read
333  *	@buf: host memory buffer
334  *
335  *	Reads an [almost] arbitrary memory region in the firmware: the
336  *	firmware memory address, length and host buffer must be aligned on
337  *	32-bit boudaries.  The memory is returned as a raw byte sequence from
338  *	the firmware's memory.  If this memory contains data structures which
339  *	contain multi-byte integers, it's the callers responsibility to
340  *	perform appropriate byte order conversions.
341  */
342 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
343 		__be32 *buf)
344 {
345 	u32 pos, start, end, offset;
346 	int ret;
347 
348 	/*
349 	 * Argument sanity checks ...
350 	 */
351 	if ((addr & 0x3) || (len & 0x3))
352 		return -EINVAL;
353 
354 	/*
355 	 * The underlaying EDC/MC read routines read 64 bytes at a time so we
356 	 * need to round down the start and round up the end.  We'll start
357 	 * copying out of the first line at (addr - start) a word at a time.
358 	 */
359 	start = addr & ~(64-1);
360 	end = (addr + len + 64-1) & ~(64-1);
361 	offset = (addr - start)/sizeof(__be32);
362 
363 	for (pos = start; pos < end; pos += 64, offset = 0) {
364 		__be32 data[16];
365 
366 		/*
367 		 * Read the chip's memory block and bail if there's an error.
368 		 */
369 		if (mtype == MEM_MC)
370 			ret = t4_mc_read(adap, pos, data, NULL);
371 		else
372 			ret = t4_edc_read(adap, mtype, pos, data, NULL);
373 		if (ret)
374 			return ret;
375 
376 		/*
377 		 * Copy the data into the caller's memory buffer.
378 		 */
379 		while (offset < 16 && len > 0) {
380 			*buf++ = data[offset++];
381 			len -= sizeof(__be32);
382 		}
383 	}
384 
385 	return 0;
386 }
387 
388 /*
389  * Partial EEPROM Vital Product Data structure.  Includes only the ID and
390  * VPD-R header.
391  */
392 struct t4_vpd_hdr {
393 	u8  id_tag;
394 	u8  id_len[2];
395 	u8  id_data[ID_LEN];
396 	u8  vpdr_tag;
397 	u8  vpdr_len[2];
398 };
399 
400 /*
401  * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
402  */
403 #define EEPROM_MAX_RD_POLL 40
404 #define EEPROM_MAX_WR_POLL 6
405 #define EEPROM_STAT_ADDR   0x7bfc
406 #define VPD_BASE           0x400
407 #define VPD_BASE_OLD       0
408 #define VPD_LEN            512
409 #define VPD_INFO_FLD_HDR_SIZE	3
410 
411 /**
412  *	t4_seeprom_read - read a serial EEPROM location
413  *	@adapter: adapter to read
414  *	@addr: EEPROM virtual address
415  *	@data: where to store the read data
416  *
417  *	Read a 32-bit word from a location in serial EEPROM using the card's PCI
418  *	VPD capability.  Note that this function must be called with a virtual
419  *	address.
420  */
421 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
422 {
423 	u16 val;
424 	int attempts = EEPROM_MAX_RD_POLL;
425 	unsigned int base = adapter->params.pci.vpd_cap_addr;
426 
427 	if (addr >= EEPROMVSIZE || (addr & 3))
428 		return -EINVAL;
429 
430 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
431 	do {
432 		udelay(10);
433 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
434 	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
435 
436 	if (!(val & PCI_VPD_ADDR_F)) {
437 		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
438 		return -EIO;
439 	}
440 	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
441 	*data = le32_to_cpu(*data);
442 	return 0;
443 }
444 
445 /**
446  *	t4_seeprom_write - write a serial EEPROM location
447  *	@adapter: adapter to write
448  *	@addr: virtual EEPROM address
449  *	@data: value to write
450  *
451  *	Write a 32-bit word to a location in serial EEPROM using the card's PCI
452  *	VPD capability.  Note that this function must be called with a virtual
453  *	address.
454  */
455 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
456 {
457 	u16 val;
458 	int attempts = EEPROM_MAX_WR_POLL;
459 	unsigned int base = adapter->params.pci.vpd_cap_addr;
460 
461 	if (addr >= EEPROMVSIZE || (addr & 3))
462 		return -EINVAL;
463 
464 	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
465 				 cpu_to_le32(data));
466 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
467 				 (u16)addr | PCI_VPD_ADDR_F);
468 	do {
469 		msleep(1);
470 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
471 	} while ((val & PCI_VPD_ADDR_F) && --attempts);
472 
473 	if (val & PCI_VPD_ADDR_F) {
474 		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
475 		return -EIO;
476 	}
477 	return 0;
478 }
479 
480 /**
481  *	t4_eeprom_ptov - translate a physical EEPROM address to virtual
482  *	@phys_addr: the physical EEPROM address
483  *	@fn: the PCI function number
484  *	@sz: size of function-specific area
485  *
486  *	Translate a physical EEPROM address to virtual.  The first 1K is
487  *	accessed through virtual addresses starting at 31K, the rest is
488  *	accessed through virtual addresses starting at 0.
489  *
490  *	The mapping is as follows:
491  *	[0..1K) -> [31K..32K)
492  *	[1K..1K+A) -> [ES-A..ES)
493  *	[1K+A..ES) -> [0..ES-A-1K)
494  *
495  *	where A = @fn * @sz, and ES = EEPROM size.
496  */
497 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
498 {
499 	fn *= sz;
500 	if (phys_addr < 1024)
501 		return phys_addr + (31 << 10);
502 	if (phys_addr < 1024 + fn)
503 		return EEPROMSIZE - fn + phys_addr - 1024;
504 	if (phys_addr < EEPROMSIZE)
505 		return phys_addr - 1024 - fn;
506 	return -EINVAL;
507 }
508 
509 /**
510  *	t4_seeprom_wp - enable/disable EEPROM write protection
511  *	@adapter: the adapter
512  *	@enable: whether to enable or disable write protection
513  *
514  *	Enables or disables write protection on the serial EEPROM.
515  */
516 int t4_seeprom_wp(struct adapter *adapter, int enable)
517 {
518 	return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
519 }
520 
521 /**
522  *	get_vpd_keyword_val - Locates an information field keyword in the VPD
523  *	@v: Pointer to buffered vpd data structure
524  *	@kw: The keyword to search for
525  *
526  *	Returns the value of the information field keyword or
527  *	-ENOENT otherwise.
528  */
529 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
530 {
531          int i;
532 	 unsigned int offset , len;
533 	 const u8 *buf = &v->id_tag;
534 	 const u8 *vpdr_len = &v->vpdr_tag;
535 	 offset = sizeof(struct t4_vpd_hdr);
536 	 len =  (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8);
537 
538 	 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
539 		 return -ENOENT;
540 	 }
541 
542          for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
543 		 if(memcmp(buf + i , kw , 2) == 0){
544 			 i += VPD_INFO_FLD_HDR_SIZE;
545                          return i;
546 		  }
547 
548                  i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
549          }
550 
551          return -ENOENT;
552 }
553 
554 
555 /**
556  *	get_vpd_params - read VPD parameters from VPD EEPROM
557  *	@adapter: adapter to read
558  *	@p: where to store the parameters
559  *
560  *	Reads card parameters stored in VPD EEPROM.
561  */
562 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
563 {
564 	int i, ret, addr;
565 	int ec, sn;
566 	u8 vpd[VPD_LEN], csum;
567 	const struct t4_vpd_hdr *v;
568 
569 	/*
570 	 * Card information normally starts at VPD_BASE but early cards had
571 	 * it at 0.
572 	 */
573 	ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
574 	addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
575 
576 	for (i = 0; i < sizeof(vpd); i += 4) {
577 		ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
578 		if (ret)
579 			return ret;
580 	}
581  	v = (const struct t4_vpd_hdr *)vpd;
582 
583 #define FIND_VPD_KW(var,name) do { \
584 	var = get_vpd_keyword_val(v , name); \
585 	if (var < 0) { \
586 		CH_ERR(adapter, "missing VPD keyword " name "\n"); \
587 		return -EINVAL; \
588 	} \
589 } while (0)
590 
591 	FIND_VPD_KW(i, "RV");
592 	for (csum = 0; i >= 0; i--)
593 		csum += vpd[i];
594 
595 	if (csum) {
596 		CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum);
597 		return -EINVAL;
598 	}
599 	FIND_VPD_KW(ec, "EC");
600 	FIND_VPD_KW(sn, "SN");
601 #undef FIND_VPD_KW
602 
603 	memcpy(p->id, v->id_data, ID_LEN);
604 	strstrip(p->id);
605 	memcpy(p->ec, vpd + ec, EC_LEN);
606 	strstrip(p->ec);
607 	i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
608 	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
609 	strstrip(p->sn);
610 
611 	return 0;
612 }
613 
614 /* serial flash and firmware constants and flash config file constants */
615 enum {
616 	SF_ATTEMPTS = 10,             /* max retries for SF operations */
617 
618 	/* flash command opcodes */
619 	SF_PROG_PAGE    = 2,          /* program page */
620 	SF_WR_DISABLE   = 4,          /* disable writes */
621 	SF_RD_STATUS    = 5,          /* read status register */
622 	SF_WR_ENABLE    = 6,          /* enable writes */
623 	SF_RD_DATA_FAST = 0xb,        /* read flash */
624 	SF_RD_ID        = 0x9f,       /* read ID */
625 	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
626 
627 	FW_START_SEC = 8,             /* first flash sector for FW */
628 	FW_END_SEC = 15,              /* last flash sector for FW */
629 	FW_IMG_START = FW_START_SEC * SF_SEC_SIZE,
630 	FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE,
631 
632 	FLASH_CFG_MAX_SIZE    = 0x10000 , /* max size of the flash config file */
633 	FLASH_CFG_OFFSET      = 0x1f0000,
634 	FLASH_CFG_START_SEC   = FLASH_CFG_OFFSET / SF_SEC_SIZE,
635 	FPGA_FLASH_CFG_OFFSET = 0xf0000 , /* if FPGA mode, then cfg file is at 1MB - 64KB */
636 	FPGA_FLASH_CFG_START_SEC  = FPGA_FLASH_CFG_OFFSET / SF_SEC_SIZE,
637 };
638 
639 /**
640  *	sf1_read - read data from the serial flash
641  *	@adapter: the adapter
642  *	@byte_cnt: number of bytes to read
643  *	@cont: whether another operation will be chained
644  *	@lock: whether to lock SF for PL access only
645  *	@valp: where to store the read data
646  *
647  *	Reads up to 4 bytes of data from the serial flash.  The location of
648  *	the read needs to be specified prior to calling this by issuing the
649  *	appropriate commands to the serial flash.
650  */
651 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
652 		    int lock, u32 *valp)
653 {
654 	int ret;
655 
656 	if (!byte_cnt || byte_cnt > 4)
657 		return -EINVAL;
658 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
659 		return -EBUSY;
660 	t4_write_reg(adapter, A_SF_OP,
661 		     V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
662 	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
663 	if (!ret)
664 		*valp = t4_read_reg(adapter, A_SF_DATA);
665 	return ret;
666 }
667 
668 /**
669  *	sf1_write - write data to the serial flash
670  *	@adapter: the adapter
671  *	@byte_cnt: number of bytes to write
672  *	@cont: whether another operation will be chained
673  *	@lock: whether to lock SF for PL access only
674  *	@val: value to write
675  *
676  *	Writes up to 4 bytes of data to the serial flash.  The location of
677  *	the write needs to be specified prior to calling this by issuing the
678  *	appropriate commands to the serial flash.
679  */
680 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
681 		     int lock, u32 val)
682 {
683 	if (!byte_cnt || byte_cnt > 4)
684 		return -EINVAL;
685 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
686 		return -EBUSY;
687 	t4_write_reg(adapter, A_SF_DATA, val);
688 	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
689 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
690 	return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
691 }
692 
693 /**
694  *	flash_wait_op - wait for a flash operation to complete
695  *	@adapter: the adapter
696  *	@attempts: max number of polls of the status register
697  *	@delay: delay between polls in ms
698  *
699  *	Wait for a flash operation to complete by polling the status register.
700  */
701 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
702 {
703 	int ret;
704 	u32 status;
705 
706 	while (1) {
707 		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
708 		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
709 			return ret;
710 		if (!(status & 1))
711 			return 0;
712 		if (--attempts == 0)
713 			return -EAGAIN;
714 		if (delay)
715 			msleep(delay);
716 	}
717 }
718 
719 /**
720  *	t4_read_flash - read words from serial flash
721  *	@adapter: the adapter
722  *	@addr: the start address for the read
723  *	@nwords: how many 32-bit words to read
724  *	@data: where to store the read data
725  *	@byte_oriented: whether to store data as bytes or as words
726  *
727  *	Read the specified number of 32-bit words from the serial flash.
728  *	If @byte_oriented is set the read data is stored as a byte array
729  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
730  *	natural endianess.
731  */
732 int t4_read_flash(struct adapter *adapter, unsigned int addr,
733 		  unsigned int nwords, u32 *data, int byte_oriented)
734 {
735 	int ret;
736 
737 	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
738 		return -EINVAL;
739 
740 	addr = swab32(addr) | SF_RD_DATA_FAST;
741 
742 	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
743 	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
744 		return ret;
745 
746 	for ( ; nwords; nwords--, data++) {
747 		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
748 		if (nwords == 1)
749 			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
750 		if (ret)
751 			return ret;
752 		if (byte_oriented)
753 			*data = htonl(*data);
754 	}
755 	return 0;
756 }
757 
758 /**
759  *	t4_write_flash - write up to a page of data to the serial flash
760  *	@adapter: the adapter
761  *	@addr: the start address to write
762  *	@n: length of data to write in bytes
763  *	@data: the data to write
764  *
765  *	Writes up to a page of data (256 bytes) to the serial flash starting
766  *	at the given address.  All the data must be written to the same page.
767  */
768 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
769 			  unsigned int n, const u8 *data)
770 {
771 	int ret;
772 	u32 buf[SF_PAGE_SIZE / 4];
773 	unsigned int i, c, left, val, offset = addr & 0xff;
774 
775 	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
776 		return -EINVAL;
777 
778 	val = swab32(addr) | SF_PROG_PAGE;
779 
780 	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
781 	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
782 		goto unlock;
783 
784 	for (left = n; left; left -= c) {
785 		c = min(left, 4U);
786 		for (val = 0, i = 0; i < c; ++i)
787 			val = (val << 8) + *data++;
788 
789 		ret = sf1_write(adapter, c, c != left, 1, val);
790 		if (ret)
791 			goto unlock;
792 	}
793 	ret = flash_wait_op(adapter, 8, 1);
794 	if (ret)
795 		goto unlock;
796 
797 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
798 
799 	/* Read the page to verify the write succeeded */
800 	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
801 	if (ret)
802 		return ret;
803 
804 	if (memcmp(data - n, (u8 *)buf + offset, n)) {
805 		CH_ERR(adapter, "failed to correctly write the flash page "
806 		       "at %#x\n", addr);
807 		return -EIO;
808 	}
809 	return 0;
810 
811 unlock:
812 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
813 	return ret;
814 }
815 
816 /**
817  *	t4_get_fw_version - read the firmware version
818  *	@adapter: the adapter
819  *	@vers: where to place the version
820  *
821  *	Reads the FW version from flash.
822  */
823 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
824 {
825 	return t4_read_flash(adapter,
826 			     FW_IMG_START + offsetof(struct fw_hdr, fw_ver), 1,
827 			     vers, 0);
828 }
829 
830 /**
831  *	t4_get_tp_version - read the TP microcode version
832  *	@adapter: the adapter
833  *	@vers: where to place the version
834  *
835  *	Reads the TP microcode version from flash.
836  */
837 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
838 {
839 	return t4_read_flash(adapter, FW_IMG_START + offsetof(struct fw_hdr,
840 							      tp_microcode_ver),
841 			     1, vers, 0);
842 }
843 
844 /**
845  *	t4_check_fw_version - check if the FW is compatible with this driver
846  *	@adapter: the adapter
847  *
848  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
849  *	if there's exact match, a negative error if the version could not be
850  *	read or there's a major version mismatch, and a positive value if the
851  *	expected major version is found but there's a minor version mismatch.
852  */
853 int t4_check_fw_version(struct adapter *adapter)
854 {
855 	u32 api_vers[2];
856 	int ret, major, minor, micro;
857 
858 	ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
859 	if (!ret)
860 		ret = t4_get_tp_version(adapter, &adapter->params.tp_vers);
861 	if (!ret)
862 		ret = t4_read_flash(adapter,
863 			FW_IMG_START + offsetof(struct fw_hdr, intfver_nic),
864 			2, api_vers, 1);
865 	if (ret)
866 		return ret;
867 
868 	major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers);
869 	minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
870 	micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
871 	memcpy(adapter->params.api_vers, api_vers,
872 	       sizeof(adapter->params.api_vers));
873 
874 	if (major != FW_VERSION_MAJOR) {            /* major mismatch - fail */
875 		CH_ERR(adapter, "card FW has major version %u, driver wants "
876 		       "%u\n", major, FW_VERSION_MAJOR);
877 		return -EINVAL;
878 	}
879 
880 	if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
881 		return 0;                                   /* perfect match */
882 
883 	/* Minor/micro version mismatch.  Report it but often it's OK. */
884 	return 1;
885 }
886 
887 /**
888  *	t4_flash_erase_sectors - erase a range of flash sectors
889  *	@adapter: the adapter
890  *	@start: the first sector to erase
891  *	@end: the last sector to erase
892  *
893  *	Erases the sectors in the given inclusive range.
894  */
895 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
896 {
897 	int ret = 0;
898 
899 	while (start <= end) {
900 		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
901 		    (ret = sf1_write(adapter, 4, 0, 1,
902 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
903 		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
904 			CH_ERR(adapter, "erase of flash sector %d failed, "
905 			       "error %d\n", start, ret);
906 			break;
907 		}
908 		start++;
909 	}
910 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
911 	return ret;
912 }
913 
914 /**
915  *	t4_load_cfg - download config file
916  *	@adap: the adapter
917  *	@cfg_data: the cfg text file to write
918  *	@size: text file size
919  *
920  *	Write the supplied config text file to the card's serial flash.
921  */
922 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
923 {
924 	int ret, i, n;
925 	unsigned int addr;
926 	unsigned int flash_cfg_start_sec;
927 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
928 
929 	if (adap->params.sf_size == 0x100000) {
930 		addr = FPGA_FLASH_CFG_OFFSET;
931 		flash_cfg_start_sec = FPGA_FLASH_CFG_START_SEC;
932 	} else {
933 		addr = FLASH_CFG_OFFSET;
934 		flash_cfg_start_sec = FLASH_CFG_START_SEC;
935 	}
936 	if (!size) {
937 		CH_ERR(adap, "cfg file has no data\n");
938 		return -EINVAL;
939 	}
940 
941 	if (size > FLASH_CFG_MAX_SIZE) {
942 		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
943 		       FLASH_CFG_MAX_SIZE);
944 		return -EFBIG;
945 	}
946 
947 	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
948 			 sf_sec_size);
949 	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
950 				     flash_cfg_start_sec + i - 1);
951 	if (ret)
952 		goto out;
953 
954         /* this will write to the flash up to SF_PAGE_SIZE at a time */
955 	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
956 		if ( (size - i) <  SF_PAGE_SIZE)
957 			n = size - i;
958 		else
959 			n = SF_PAGE_SIZE;
960 		ret = t4_write_flash(adap, addr, n, cfg_data);
961 		if (ret)
962 			goto out;
963 
964 		addr += SF_PAGE_SIZE;
965 		cfg_data += SF_PAGE_SIZE;
966 	}
967 
968 out:
969 	if (ret)
970 		CH_ERR(adap, "config file download failed %d\n", ret);
971 	return ret;
972 }
973 
974 
975 /**
976  *	t4_load_fw - download firmware
977  *	@adap: the adapter
978  *	@fw_data: the firmware image to write
979  *	@size: image size
980  *
981  *	Write the supplied firmware image to the card's serial flash.
982  */
983 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
984 {
985 	u32 csum;
986 	int ret, addr;
987 	unsigned int i;
988 	u8 first_page[SF_PAGE_SIZE];
989 	const u32 *p = (const u32 *)fw_data;
990 	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
991 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
992 
993 	if (!size) {
994 		CH_ERR(adap, "FW image has no data\n");
995 		return -EINVAL;
996 	}
997 	if (size & 511) {
998 		CH_ERR(adap, "FW image size not multiple of 512 bytes\n");
999 		return -EINVAL;
1000 	}
1001 	if (ntohs(hdr->len512) * 512 != size) {
1002 		CH_ERR(adap, "FW image size differs from size in FW header\n");
1003 		return -EINVAL;
1004 	}
1005 	if (size > FW_MAX_SIZE) {
1006 		CH_ERR(adap, "FW image too large, max is %u bytes\n",
1007 		       FW_MAX_SIZE);
1008 		return -EFBIG;
1009 	}
1010 
1011 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1012 		csum += ntohl(p[i]);
1013 
1014 	if (csum != 0xffffffff) {
1015 		CH_ERR(adap, "corrupted firmware image, checksum %#x\n",
1016 		       csum);
1017 		return -EINVAL;
1018 	}
1019 
1020 	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
1021 	ret = t4_flash_erase_sectors(adap, FW_START_SEC, FW_START_SEC + i - 1);
1022 	if (ret)
1023 		goto out;
1024 
1025 	/*
1026 	 * We write the correct version at the end so the driver can see a bad
1027 	 * version if the FW write fails.  Start by writing a copy of the
1028 	 * first page with a bad version.
1029 	 */
1030 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
1031 	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1032 	ret = t4_write_flash(adap, FW_IMG_START, SF_PAGE_SIZE, first_page);
1033 	if (ret)
1034 		goto out;
1035 
1036 	addr = FW_IMG_START;
1037 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1038 		addr += SF_PAGE_SIZE;
1039 		fw_data += SF_PAGE_SIZE;
1040 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1041 		if (ret)
1042 			goto out;
1043 	}
1044 
1045 	ret = t4_write_flash(adap,
1046 			     FW_IMG_START + offsetof(struct fw_hdr, fw_ver),
1047 			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1048 out:
1049 	if (ret)
1050 		CH_ERR(adap, "firmware download failed, error %d\n", ret);
1051 	return ret;
1052 }
1053 
1054 /**
1055  *	t4_read_cimq_cfg - read CIM queue configuration
1056  *	@adap: the adapter
1057  *	@base: holds the queue base addresses in bytes
1058  *	@size: holds the queue sizes in bytes
1059  *	@thres: holds the queue full thresholds in bytes
1060  *
1061  *	Returns the current configuration of the CIM queues, starting with
1062  *	the IBQs, then the OBQs.
1063  */
1064 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
1065 {
1066 	unsigned int i, v;
1067 
1068 	for (i = 0; i < CIM_NUM_IBQ; i++) {
1069 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
1070 			     V_QUENUMSELECT(i));
1071 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1072 		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1073 		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1074 		*thres++ = G_QUEFULLTHRSH(v) * 8;   /* 8-byte unit */
1075 	}
1076 	for (i = 0; i < CIM_NUM_OBQ; i++) {
1077 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1078 			     V_QUENUMSELECT(i));
1079 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1080 		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1081 		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1082 	}
1083 }
1084 
1085 /**
1086  *	t4_read_cim_ibq - read the contents of a CIM inbound queue
1087  *	@adap: the adapter
1088  *	@qid: the queue index
1089  *	@data: where to store the queue contents
1090  *	@n: capacity of @data in 32-bit words
1091  *
1092  *	Reads the contents of the selected CIM queue starting at address 0 up
1093  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1094  *	error and the number of 32-bit words actually read on success.
1095  */
1096 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1097 {
1098 	int i, err;
1099 	unsigned int addr;
1100 	const unsigned int nwords = CIM_IBQ_SIZE * 4;
1101 
1102 	if (qid > 5 || (n & 3))
1103 		return -EINVAL;
1104 
1105 	addr = qid * nwords;
1106 	if (n > nwords)
1107 		n = nwords;
1108 
1109 	for (i = 0; i < n; i++, addr++) {
1110 		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
1111 			     F_IBQDBGEN);
1112 		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
1113 				      2, 1);
1114 		if (err)
1115 			return err;
1116 		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
1117 	}
1118 	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
1119 	return i;
1120 }
1121 
1122 /**
1123  *	t4_read_cim_obq - read the contents of a CIM outbound queue
1124  *	@adap: the adapter
1125  *	@qid: the queue index
1126  *	@data: where to store the queue contents
1127  *	@n: capacity of @data in 32-bit words
1128  *
1129  *	Reads the contents of the selected CIM queue starting at address 0 up
1130  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1131  *	error and the number of 32-bit words actually read on success.
1132  */
1133 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1134 {
1135 	int i, err;
1136 	unsigned int addr, v, nwords;
1137 
1138 	if (qid > 5 || (n & 3))
1139 		return -EINVAL;
1140 
1141 	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1142 		     V_QUENUMSELECT(qid));
1143 	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1144 
1145 	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
1146 	nwords = G_CIMQSIZE(v) * 64;  /* same */
1147 	if (n > nwords)
1148 		n = nwords;
1149 
1150 	for (i = 0; i < n; i++, addr++) {
1151 		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
1152 			     F_OBQDBGEN);
1153 		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
1154 				      2, 1);
1155 		if (err)
1156 			return err;
1157 		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
1158 	}
1159 	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
1160 	return i;
1161 }
1162 
1163 enum {
1164 	CIM_QCTL_BASE     = 0,
1165 	CIM_CTL_BASE      = 0x2000,
1166 	CIM_PBT_ADDR_BASE = 0x2800,
1167 	CIM_PBT_LRF_BASE  = 0x3000,
1168 	CIM_PBT_DATA_BASE = 0x3800
1169 };
1170 
1171 /**
1172  *	t4_cim_read - read a block from CIM internal address space
1173  *	@adap: the adapter
1174  *	@addr: the start address within the CIM address space
1175  *	@n: number of words to read
1176  *	@valp: where to store the result
1177  *
1178  *	Reads a block of 4-byte words from the CIM intenal address space.
1179  */
1180 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1181 		unsigned int *valp)
1182 {
1183 	int ret = 0;
1184 
1185 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1186 		return -EBUSY;
1187 
1188 	for ( ; !ret && n--; addr += 4) {
1189 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
1190 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1191 				      0, 5, 2);
1192 		if (!ret)
1193 			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
1194 	}
1195 	return ret;
1196 }
1197 
1198 /**
1199  *	t4_cim_write - write a block into CIM internal address space
1200  *	@adap: the adapter
1201  *	@addr: the start address within the CIM address space
1202  *	@n: number of words to write
1203  *	@valp: set of values to write
1204  *
1205  *	Writes a block of 4-byte words into the CIM intenal address space.
1206  */
1207 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1208 		 const unsigned int *valp)
1209 {
1210 	int ret = 0;
1211 
1212 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1213 		return -EBUSY;
1214 
1215 	for ( ; !ret && n--; addr += 4) {
1216 		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
1217 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
1218 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1219 				      0, 5, 2);
1220 	}
1221 	return ret;
1222 }
1223 
1224 static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val)
1225 {
1226 	return t4_cim_write(adap, addr, 1, &val);
1227 }
1228 
1229 /**
1230  *	t4_cim_ctl_read - read a block from CIM control region
1231  *	@adap: the adapter
1232  *	@addr: the start address within the CIM control region
1233  *	@n: number of words to read
1234  *	@valp: where to store the result
1235  *
1236  *	Reads a block of 4-byte words from the CIM control region.
1237  */
1238 int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
1239 		    unsigned int *valp)
1240 {
1241 	return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
1242 }
1243 
1244 /**
1245  *	t4_cim_read_la - read CIM LA capture buffer
1246  *	@adap: the adapter
1247  *	@la_buf: where to store the LA data
1248  *	@wrptr: the HW write pointer within the capture buffer
1249  *
1250  *	Reads the contents of the CIM LA buffer with the most recent entry at
1251  *	the end	of the returned data and with the entry at @wrptr first.
1252  *	We try to leave the LA in the running state we find it in.
1253  */
1254 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
1255 {
1256 	int i, ret;
1257 	unsigned int cfg, val, idx;
1258 
1259 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
1260 	if (ret)
1261 		return ret;
1262 
1263 	if (cfg & F_UPDBGLAEN) {                /* LA is running, freeze it */
1264 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
1265 		if (ret)
1266 			return ret;
1267 	}
1268 
1269 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1270 	if (ret)
1271 		goto restart;
1272 
1273 	idx = G_UPDBGLAWRPTR(val);
1274 	if (wrptr)
1275 		*wrptr = idx;
1276 
1277 	for (i = 0; i < adap->params.cim_la_size; i++) {
1278 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1279 				    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
1280 		if (ret)
1281 			break;
1282 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1283 		if (ret)
1284 			break;
1285 		if (val & F_UPDBGLARDEN) {
1286 			ret = -ETIMEDOUT;
1287 			break;
1288 		}
1289 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
1290 		if (ret)
1291 			break;
1292 		idx = (idx + 1) & M_UPDBGLARDPTR;
1293 	}
1294 restart:
1295 	if (cfg & F_UPDBGLAEN) {
1296 		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1297 				      cfg & ~F_UPDBGLARDEN);
1298 		if (!ret)
1299 			ret = r;
1300 	}
1301 	return ret;
1302 }
1303 
1304 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
1305 			unsigned int *pif_req_wrptr,
1306 			unsigned int *pif_rsp_wrptr)
1307 {
1308 	int i, j;
1309 	u32 cfg, val, req, rsp;
1310 
1311 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1312 	if (cfg & F_LADBGEN)
1313 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1314 
1315 	val = t4_read_reg(adap, A_CIM_DEBUGSTS);
1316 	req = G_POLADBGWRPTR(val);
1317 	rsp = G_PILADBGWRPTR(val);
1318 	if (pif_req_wrptr)
1319 		*pif_req_wrptr = req;
1320 	if (pif_rsp_wrptr)
1321 		*pif_rsp_wrptr = rsp;
1322 
1323 	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
1324 		for (j = 0; j < 6; j++) {
1325 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
1326 				     V_PILADBGRDPTR(rsp));
1327 			*pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
1328 			*pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
1329 			req++;
1330 			rsp++;
1331 		}
1332 		req = (req + 2) & M_POLADBGRDPTR;
1333 		rsp = (rsp + 2) & M_PILADBGRDPTR;
1334 	}
1335 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1336 }
1337 
1338 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
1339 {
1340 	u32 cfg;
1341 	int i, j, idx;
1342 
1343 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1344 	if (cfg & F_LADBGEN)
1345 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1346 
1347 	for (i = 0; i < CIM_MALA_SIZE; i++) {
1348 		for (j = 0; j < 5; j++) {
1349 			idx = 8 * i + j;
1350 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
1351 				     V_PILADBGRDPTR(idx));
1352 			*ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
1353 			*ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
1354 		}
1355 	}
1356 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1357 }
1358 
1359 /**
1360  *	t4_tp_read_la - read TP LA capture buffer
1361  *	@adap: the adapter
1362  *	@la_buf: where to store the LA data
1363  *	@wrptr: the HW write pointer within the capture buffer
1364  *
1365  *	Reads the contents of the TP LA buffer with the most recent entry at
1366  *	the end	of the returned data and with the entry at @wrptr first.
1367  *	We leave the LA in the running state we find it in.
1368  */
1369 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
1370 {
1371 	bool last_incomplete;
1372 	unsigned int i, cfg, val, idx;
1373 
1374 	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
1375 	if (cfg & F_DBGLAENABLE)                    /* freeze LA */
1376 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1377 			     adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
1378 
1379 	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
1380 	idx = G_DBGLAWPTR(val);
1381 	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
1382 	if (last_incomplete)
1383 		idx = (idx + 1) & M_DBGLARPTR;
1384 	if (wrptr)
1385 		*wrptr = idx;
1386 
1387 	val &= 0xffff;
1388 	val &= ~V_DBGLARPTR(M_DBGLARPTR);
1389 	val |= adap->params.tp.la_mask;
1390 
1391 	for (i = 0; i < TPLA_SIZE; i++) {
1392 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
1393 		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
1394 		idx = (idx + 1) & M_DBGLARPTR;
1395 	}
1396 
1397 	/* Wipe out last entry if it isn't valid */
1398 	if (last_incomplete)
1399 		la_buf[TPLA_SIZE - 1] = ~0ULL;
1400 
1401 	if (cfg & F_DBGLAENABLE)                    /* restore running state */
1402 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1403 			     cfg | adap->params.tp.la_mask);
1404 }
1405 
1406 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
1407 {
1408 	unsigned int i, j;
1409 
1410 	for (i = 0; i < 8; i++) {
1411 		u32 *p = la_buf + i;
1412 
1413 		t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
1414 		j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
1415 		t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
1416 		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
1417 			*p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
1418 	}
1419 }
1420 
1421 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1422 		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
1423 
1424 /**
1425  *	t4_link_start - apply link configuration to MAC/PHY
1426  *	@phy: the PHY to setup
1427  *	@mac: the MAC to setup
1428  *	@lc: the requested link configuration
1429  *
1430  *	Set up a port's MAC and PHY according to a desired link configuration.
1431  *	- If the PHY can auto-negotiate first decide what to advertise, then
1432  *	  enable/disable auto-negotiation as desired, and reset.
1433  *	- If the PHY does not auto-negotiate just reset it.
1434  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1435  *	  otherwise do it later based on the outcome of auto-negotiation.
1436  */
1437 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1438 		  struct link_config *lc)
1439 {
1440 	struct fw_port_cmd c;
1441 	unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
1442 
1443 	lc->link_ok = 0;
1444 	if (lc->requested_fc & PAUSE_RX)
1445 		fc |= FW_PORT_CAP_FC_RX;
1446 	if (lc->requested_fc & PAUSE_TX)
1447 		fc |= FW_PORT_CAP_FC_TX;
1448 
1449 	memset(&c, 0, sizeof(c));
1450 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1451 			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1452 	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1453 				  FW_LEN16(c));
1454 
1455 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1456 		c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1457 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1458 	} else if (lc->autoneg == AUTONEG_DISABLE) {
1459 		c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1460 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1461 	} else
1462 		c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1463 
1464 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1465 }
1466 
1467 /**
1468  *	t4_restart_aneg - restart autonegotiation
1469  *	@adap: the adapter
1470  *	@mbox: mbox to use for the FW command
1471  *	@port: the port id
1472  *
1473  *	Restarts autonegotiation for the selected port.
1474  */
1475 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1476 {
1477 	struct fw_port_cmd c;
1478 
1479 	memset(&c, 0, sizeof(c));
1480 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1481 			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1482 	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1483 				  FW_LEN16(c));
1484 	c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1485 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1486 }
1487 
1488 struct intr_info {
1489 	unsigned int mask;       /* bits to check in interrupt status */
1490 	const char *msg;         /* message to print or NULL */
1491 	short stat_idx;          /* stat counter to increment or -1 */
1492 	unsigned short fatal;    /* whether the condition reported is fatal */
1493 };
1494 
1495 /**
1496  *	t4_handle_intr_status - table driven interrupt handler
1497  *	@adapter: the adapter that generated the interrupt
1498  *	@reg: the interrupt status register to process
1499  *	@acts: table of interrupt actions
1500  *
1501  *	A table driven interrupt handler that applies a set of masks to an
1502  *	interrupt status word and performs the corresponding actions if the
1503  *	interrupts described by the mask have occured.  The actions include
1504  *	optionally emitting a warning or alert message.  The table is terminated
1505  *	by an entry specifying mask 0.  Returns the number of fatal interrupt
1506  *	conditions.
1507  */
1508 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1509 				 const struct intr_info *acts)
1510 {
1511 	int fatal = 0;
1512 	unsigned int mask = 0;
1513 	unsigned int status = t4_read_reg(adapter, reg);
1514 
1515 	for ( ; acts->mask; ++acts) {
1516 		if (!(status & acts->mask))
1517 			continue;
1518 		if (acts->fatal) {
1519 			fatal++;
1520 			CH_ALERT(adapter, "%s (0x%x)\n",
1521 				 acts->msg, status & acts->mask);
1522 		} else if (acts->msg)
1523 			CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n",
1524 					  acts->msg, status & acts->mask);
1525 		mask |= acts->mask;
1526 	}
1527 	status &= mask;
1528 	if (status)                           /* clear processed interrupts */
1529 		t4_write_reg(adapter, reg, status);
1530 	return fatal;
1531 }
1532 
1533 /*
1534  * Interrupt handler for the PCIE module.
1535  */
1536 static void pcie_intr_handler(struct adapter *adapter)
1537 {
1538 	static struct intr_info sysbus_intr_info[] = {
1539 		{ F_RNPP, "RXNP array parity error", -1, 1 },
1540 		{ F_RPCP, "RXPC array parity error", -1, 1 },
1541 		{ F_RCIP, "RXCIF array parity error", -1, 1 },
1542 		{ F_RCCP, "Rx completions control array parity error", -1, 1 },
1543 		{ F_RFTP, "RXFT array parity error", -1, 1 },
1544 		{ 0 }
1545 	};
1546 	static struct intr_info pcie_port_intr_info[] = {
1547 		{ F_TPCP, "TXPC array parity error", -1, 1 },
1548 		{ F_TNPP, "TXNP array parity error", -1, 1 },
1549 		{ F_TFTP, "TXFT array parity error", -1, 1 },
1550 		{ F_TCAP, "TXCA array parity error", -1, 1 },
1551 		{ F_TCIP, "TXCIF array parity error", -1, 1 },
1552 		{ F_RCAP, "RXCA array parity error", -1, 1 },
1553 		{ F_OTDD, "outbound request TLP discarded", -1, 1 },
1554 		{ F_RDPE, "Rx data parity error", -1, 1 },
1555 		{ F_TDUE, "Tx uncorrectable data error", -1, 1 },
1556 		{ 0 }
1557 	};
1558 	static struct intr_info pcie_intr_info[] = {
1559 		{ F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1560 		{ F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1561 		{ F_MSIDATAPERR, "MSI data parity error", -1, 1 },
1562 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1563 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1564 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1565 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1566 		{ F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1567 		{ F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1568 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1569 		{ F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1570 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1571 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1572 		{ F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1573 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1574 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1575 		{ F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1576 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1577 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1578 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1579 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
1580 		{ F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1581 		{ F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
1582 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1583 		{ F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1584 		{ F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
1585 		{ F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
1586 		{ F_PCIESINT, "PCI core secondary fault", -1, 1 },
1587 		{ F_PCIEPINT, "PCI core primary fault", -1, 1 },
1588 		{ F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
1589 		  0 },
1590 		{ 0 }
1591 	};
1592 
1593 	int fat;
1594 
1595 	fat = t4_handle_intr_status(adapter,
1596 				    A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1597 				    sysbus_intr_info) +
1598 	      t4_handle_intr_status(adapter,
1599 				    A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1600 				    pcie_port_intr_info) +
1601 	      t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, pcie_intr_info);
1602 	if (fat)
1603 		t4_fatal_err(adapter);
1604 }
1605 
1606 /*
1607  * TP interrupt handler.
1608  */
1609 static void tp_intr_handler(struct adapter *adapter)
1610 {
1611 	static struct intr_info tp_intr_info[] = {
1612 		{ 0x3fffffff, "TP parity error", -1, 1 },
1613 		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1614 		{ 0 }
1615 	};
1616 
1617 	if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
1618 		t4_fatal_err(adapter);
1619 }
1620 
1621 /*
1622  * SGE interrupt handler.
1623  */
1624 static void sge_intr_handler(struct adapter *adapter)
1625 {
1626 	u64 v;
1627 	u32 err;
1628 
1629 	static struct intr_info sge_intr_info[] = {
1630 		{ F_ERR_CPL_EXCEED_IQE_SIZE,
1631 		  "SGE received CPL exceeding IQE size", -1, 1 },
1632 		{ F_ERR_INVALID_CIDX_INC,
1633 		  "SGE GTS CIDX increment too large", -1, 0 },
1634 		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1635 		{ F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1636 		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
1637 		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
1638 		{ F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1639 		  0 },
1640 		{ F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1641 		  0 },
1642 		{ F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1643 		  0 },
1644 		{ F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1645 		  0 },
1646 		{ F_ERR_ING_CTXT_PRIO,
1647 		  "SGE too many priority ingress contexts", -1, 0 },
1648 		{ F_ERR_EGR_CTXT_PRIO,
1649 		  "SGE too many priority egress contexts", -1, 0 },
1650 		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1651 		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1652 		{ 0 }
1653 	};
1654 
1655 	v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
1656 	    ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
1657 	if (v) {
1658 		CH_ALERT(adapter, "SGE parity error (%#llx)\n",
1659 			 (unsigned long long)v);
1660 		t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
1661 		t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
1662 	}
1663 
1664 	v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
1665 
1666 	err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
1667 	if (err & F_ERROR_QID_VALID) {
1668 		CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
1669 		t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID);
1670 	}
1671 
1672 	if (v != 0)
1673 		t4_fatal_err(adapter);
1674 }
1675 
1676 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
1677 		      F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
1678 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
1679 		      F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
1680 
1681 /*
1682  * CIM interrupt handler.
1683  */
1684 static void cim_intr_handler(struct adapter *adapter)
1685 {
1686 	static struct intr_info cim_intr_info[] = {
1687 		{ F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1688 		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
1689 		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
1690 		{ F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1691 		{ F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1692 		{ F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1693 		{ F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1694 		{ 0 }
1695 	};
1696 	static struct intr_info cim_upintr_info[] = {
1697 		{ F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1698 		{ F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1699 		{ F_ILLWRINT, "CIM illegal write", -1, 1 },
1700 		{ F_ILLRDINT, "CIM illegal read", -1, 1 },
1701 		{ F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1702 		{ F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1703 		{ F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1704 		{ F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1705 		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1706 		{ F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1707 		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1708 		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1709 		{ F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1710 		{ F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1711 		{ F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1712 		{ F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1713 		{ F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1714 		{ F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1715 		{ F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1716 		{ F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1717 		{ F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1718 		{ F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1719 		{ F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1720 		{ F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1721 		{ F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1722 		{ F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1723 		{ F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1724 		{ F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1725 		{ 0 }
1726 	};
1727 
1728 	int fat;
1729 
1730 	fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
1731 				    cim_intr_info) +
1732 	      t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
1733 				    cim_upintr_info);
1734 	if (fat)
1735 		t4_fatal_err(adapter);
1736 }
1737 
1738 /*
1739  * ULP RX interrupt handler.
1740  */
1741 static void ulprx_intr_handler(struct adapter *adapter)
1742 {
1743 	static struct intr_info ulprx_intr_info[] = {
1744 		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
1745 		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
1746 		{ 0x7fffff, "ULPRX parity error", -1, 1 },
1747 		{ 0 }
1748 	};
1749 
1750 	if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
1751 		t4_fatal_err(adapter);
1752 }
1753 
1754 /*
1755  * ULP TX interrupt handler.
1756  */
1757 static void ulptx_intr_handler(struct adapter *adapter)
1758 {
1759 	static struct intr_info ulptx_intr_info[] = {
1760 		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1761 		  0 },
1762 		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1763 		  0 },
1764 		{ F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1765 		  0 },
1766 		{ F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1767 		  0 },
1768 		{ 0xfffffff, "ULPTX parity error", -1, 1 },
1769 		{ 0 }
1770 	};
1771 
1772 	if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
1773 		t4_fatal_err(adapter);
1774 }
1775 
1776 /*
1777  * PM TX interrupt handler.
1778  */
1779 static void pmtx_intr_handler(struct adapter *adapter)
1780 {
1781 	static struct intr_info pmtx_intr_info[] = {
1782 		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1783 		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1784 		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1785 		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1786 		{ 0xffffff0, "PMTX framing error", -1, 1 },
1787 		{ F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1788 		{ F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
1789 		  1 },
1790 		{ F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1791 		{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1792 		{ 0 }
1793 	};
1794 
1795 	if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
1796 		t4_fatal_err(adapter);
1797 }
1798 
1799 /*
1800  * PM RX interrupt handler.
1801  */
1802 static void pmrx_intr_handler(struct adapter *adapter)
1803 {
1804 	static struct intr_info pmrx_intr_info[] = {
1805 		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1806 		{ 0x3ffff0, "PMRX framing error", -1, 1 },
1807 		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1808 		{ F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
1809 		  1 },
1810 		{ F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1811 		{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1812 		{ 0 }
1813 	};
1814 
1815 	if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
1816 		t4_fatal_err(adapter);
1817 }
1818 
1819 /*
1820  * CPL switch interrupt handler.
1821  */
1822 static void cplsw_intr_handler(struct adapter *adapter)
1823 {
1824 	static struct intr_info cplsw_intr_info[] = {
1825 		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1826 		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1827 		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1828 		{ F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1829 		{ F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1830 		{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1831 		{ 0 }
1832 	};
1833 
1834 	if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
1835 		t4_fatal_err(adapter);
1836 }
1837 
1838 /*
1839  * LE interrupt handler.
1840  */
1841 static void le_intr_handler(struct adapter *adap)
1842 {
1843 	static struct intr_info le_intr_info[] = {
1844 		{ F_LIPMISS, "LE LIP miss", -1, 0 },
1845 		{ F_LIP0, "LE 0 LIP error", -1, 0 },
1846 		{ F_PARITYERR, "LE parity error", -1, 1 },
1847 		{ F_UNKNOWNCMD, "LE unknown command", -1, 1 },
1848 		{ F_REQQPARERR, "LE request queue parity error", -1, 1 },
1849 		{ 0 }
1850 	};
1851 
1852 	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info))
1853 		t4_fatal_err(adap);
1854 }
1855 
1856 /*
1857  * MPS interrupt handler.
1858  */
1859 static void mps_intr_handler(struct adapter *adapter)
1860 {
1861 	static struct intr_info mps_rx_intr_info[] = {
1862 		{ 0xffffff, "MPS Rx parity error", -1, 1 },
1863 		{ 0 }
1864 	};
1865 	static struct intr_info mps_tx_intr_info[] = {
1866 		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
1867 		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1868 		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
1869 		  -1, 1 },
1870 		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
1871 		  -1, 1 },
1872 		{ F_BUBBLE, "MPS Tx underflow", -1, 1 },
1873 		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1874 		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
1875 		{ 0 }
1876 	};
1877 	static struct intr_info mps_trc_intr_info[] = {
1878 		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
1879 		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
1880 		  1 },
1881 		{ F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
1882 		{ 0 }
1883 	};
1884 	static struct intr_info mps_stat_sram_intr_info[] = {
1885 		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1886 		{ 0 }
1887 	};
1888 	static struct intr_info mps_stat_tx_intr_info[] = {
1889 		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1890 		{ 0 }
1891 	};
1892 	static struct intr_info mps_stat_rx_intr_info[] = {
1893 		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1894 		{ 0 }
1895 	};
1896 	static struct intr_info mps_cls_intr_info[] = {
1897 		{ F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1898 		{ F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1899 		{ F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1900 		{ 0 }
1901 	};
1902 
1903 	int fat;
1904 
1905 	fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
1906 				    mps_rx_intr_info) +
1907 	      t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
1908 				    mps_tx_intr_info) +
1909 	      t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
1910 				    mps_trc_intr_info) +
1911 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
1912 				    mps_stat_sram_intr_info) +
1913 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1914 				    mps_stat_tx_intr_info) +
1915 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1916 				    mps_stat_rx_intr_info) +
1917 	      t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
1918 				    mps_cls_intr_info);
1919 
1920 	t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
1921 	t4_read_reg(adapter, A_MPS_INT_CAUSE);                    /* flush */
1922 	if (fat)
1923 		t4_fatal_err(adapter);
1924 }
1925 
1926 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE)
1927 
1928 /*
1929  * EDC/MC interrupt handler.
1930  */
1931 static void mem_intr_handler(struct adapter *adapter, int idx)
1932 {
1933 	static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1934 
1935 	unsigned int addr, cnt_addr, v;
1936 
1937 	if (idx <= MEM_EDC1) {
1938 		addr = EDC_REG(A_EDC_INT_CAUSE, idx);
1939 		cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
1940 	} else {
1941 		addr = A_MC_INT_CAUSE;
1942 		cnt_addr = A_MC_ECC_STATUS;
1943 	}
1944 
1945 	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1946 	if (v & F_PERR_INT_CAUSE)
1947 		CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]);
1948 	if (v & F_ECC_CE_INT_CAUSE) {
1949 		u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
1950 
1951 		t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
1952 		CH_WARN_RATELIMIT(adapter,
1953 				  "%u %s correctable ECC data error%s\n",
1954 				  cnt, name[idx], cnt > 1 ? "s" : "");
1955 	}
1956 	if (v & F_ECC_UE_INT_CAUSE)
1957 		CH_ALERT(adapter, "%s uncorrectable ECC data error\n",
1958 			 name[idx]);
1959 
1960 	t4_write_reg(adapter, addr, v);
1961 	if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
1962 		t4_fatal_err(adapter);
1963 }
1964 
1965 /*
1966  * MA interrupt handler.
1967  */
1968 static void ma_intr_handler(struct adapter *adapter)
1969 {
1970 	u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
1971 
1972 	if (status & F_MEM_PERR_INT_CAUSE)
1973 		CH_ALERT(adapter, "MA parity error, parity status %#x\n",
1974 			 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS));
1975 	if (status & F_MEM_WRAP_INT_CAUSE) {
1976 		v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
1977 		CH_ALERT(adapter, "MA address wrap-around error by client %u to"
1978 			 " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v),
1979 			 G_MEM_WRAP_ADDRESS(v) << 4);
1980 	}
1981 	t4_write_reg(adapter, A_MA_INT_CAUSE, status);
1982 	t4_fatal_err(adapter);
1983 }
1984 
1985 /*
1986  * SMB interrupt handler.
1987  */
1988 static void smb_intr_handler(struct adapter *adap)
1989 {
1990 	static struct intr_info smb_intr_info[] = {
1991 		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1992 		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1993 		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1994 		{ 0 }
1995 	};
1996 
1997 	if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
1998 		t4_fatal_err(adap);
1999 }
2000 
2001 /*
2002  * NC-SI interrupt handler.
2003  */
2004 static void ncsi_intr_handler(struct adapter *adap)
2005 {
2006 	static struct intr_info ncsi_intr_info[] = {
2007 		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
2008 		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
2009 		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
2010 		{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
2011 		{ 0 }
2012 	};
2013 
2014 	if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
2015 		t4_fatal_err(adap);
2016 }
2017 
2018 /*
2019  * XGMAC interrupt handler.
2020  */
2021 static void xgmac_intr_handler(struct adapter *adap, int port)
2022 {
2023 	u32 v = t4_read_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE));
2024 
2025 	v &= F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
2026 	if (!v)
2027 		return;
2028 
2029 	if (v & F_TXFIFO_PRTY_ERR)
2030 		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port);
2031 	if (v & F_RXFIFO_PRTY_ERR)
2032 		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port);
2033 	t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE), v);
2034 	t4_fatal_err(adap);
2035 }
2036 
2037 /*
2038  * PL interrupt handler.
2039  */
2040 static void pl_intr_handler(struct adapter *adap)
2041 {
2042 	static struct intr_info pl_intr_info[] = {
2043 		{ F_FATALPERR, "T4 fatal parity error", -1, 1 },
2044 		{ F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
2045 		{ 0 }
2046 	};
2047 
2048 	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE, pl_intr_info))
2049 		t4_fatal_err(adap);
2050 }
2051 
2052 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
2053 #define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
2054 		F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
2055 		F_CPL_SWITCH | F_SGE | F_ULP_TX)
2056 
2057 /**
2058  *	t4_slow_intr_handler - control path interrupt handler
2059  *	@adapter: the adapter
2060  *
2061  *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
2062  *	The designation 'slow' is because it involves register reads, while
2063  *	data interrupts typically don't involve any MMIOs.
2064  */
2065 int t4_slow_intr_handler(struct adapter *adapter)
2066 {
2067 	u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
2068 
2069 	if (!(cause & GLBL_INTR_MASK))
2070 		return 0;
2071 	if (cause & F_CIM)
2072 		cim_intr_handler(adapter);
2073 	if (cause & F_MPS)
2074 		mps_intr_handler(adapter);
2075 	if (cause & F_NCSI)
2076 		ncsi_intr_handler(adapter);
2077 	if (cause & F_PL)
2078 		pl_intr_handler(adapter);
2079 	if (cause & F_SMB)
2080 		smb_intr_handler(adapter);
2081 	if (cause & F_XGMAC0)
2082 		xgmac_intr_handler(adapter, 0);
2083 	if (cause & F_XGMAC1)
2084 		xgmac_intr_handler(adapter, 1);
2085 	if (cause & F_XGMAC_KR0)
2086 		xgmac_intr_handler(adapter, 2);
2087 	if (cause & F_XGMAC_KR1)
2088 		xgmac_intr_handler(adapter, 3);
2089 	if (cause & F_PCIE)
2090 		pcie_intr_handler(adapter);
2091 	if (cause & F_MC)
2092 		mem_intr_handler(adapter, MEM_MC);
2093 	if (cause & F_EDC0)
2094 		mem_intr_handler(adapter, MEM_EDC0);
2095 	if (cause & F_EDC1)
2096 		mem_intr_handler(adapter, MEM_EDC1);
2097 	if (cause & F_LE)
2098 		le_intr_handler(adapter);
2099 	if (cause & F_TP)
2100 		tp_intr_handler(adapter);
2101 	if (cause & F_MA)
2102 		ma_intr_handler(adapter);
2103 	if (cause & F_PM_TX)
2104 		pmtx_intr_handler(adapter);
2105 	if (cause & F_PM_RX)
2106 		pmrx_intr_handler(adapter);
2107 	if (cause & F_ULP_RX)
2108 		ulprx_intr_handler(adapter);
2109 	if (cause & F_CPL_SWITCH)
2110 		cplsw_intr_handler(adapter);
2111 	if (cause & F_SGE)
2112 		sge_intr_handler(adapter);
2113 	if (cause & F_ULP_TX)
2114 		ulptx_intr_handler(adapter);
2115 
2116 	/* Clear the interrupts just processed for which we are the master. */
2117 	t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
2118 	(void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2119 	return 1;
2120 }
2121 
2122 /**
2123  *	t4_intr_enable - enable interrupts
2124  *	@adapter: the adapter whose interrupts should be enabled
2125  *
2126  *	Enable PF-specific interrupts for the calling function and the top-level
2127  *	interrupt concentrator for global interrupts.  Interrupts are already
2128  *	enabled at each module,	here we just enable the roots of the interrupt
2129  *	hierarchies.
2130  *
2131  *	Note: this function should be called only when the driver manages
2132  *	non PF-specific interrupts from the various HW modules.  Only one PCI
2133  *	function at a time should be doing this.
2134  */
2135 void t4_intr_enable(struct adapter *adapter)
2136 {
2137 	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2138 
2139 	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2140 		     F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
2141 		     F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 |
2142 		     F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
2143 		     F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2144 		     F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
2145 		     F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR |
2146 		     F_EGRESS_SIZE_ERR);
2147 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2148 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2149 }
2150 
2151 /**
2152  *	t4_intr_disable - disable interrupts
2153  *	@adapter: the adapter whose interrupts should be disabled
2154  *
2155  *	Disable interrupts.  We only disable the top-level interrupt
2156  *	concentrators.  The caller must be a PCI function managing global
2157  *	interrupts.
2158  */
2159 void t4_intr_disable(struct adapter *adapter)
2160 {
2161 	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2162 
2163 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2164 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2165 }
2166 
2167 /**
2168  *	t4_intr_clear - clear all interrupts
2169  *	@adapter: the adapter whose interrupts should be cleared
2170  *
2171  *	Clears all interrupts.  The caller must be a PCI function managing
2172  *	global interrupts.
2173  */
2174 void t4_intr_clear(struct adapter *adapter)
2175 {
2176 	static const unsigned int cause_reg[] = {
2177 		A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
2178 		A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2179 		A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2180 		A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
2181 		A_MC_INT_CAUSE,
2182 		A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS, A_MA_INT_CAUSE,
2183 		A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
2184 		A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
2185 		MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
2186 		A_TP_INT_CAUSE,
2187 		A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
2188 		A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
2189 		A_MPS_RX_PERR_INT_CAUSE,
2190 		A_CPL_INTR_CAUSE,
2191 		MYPF_REG(A_PL_PF_INT_CAUSE),
2192 		A_PL_PL_INT_CAUSE,
2193 		A_LE_DB_INT_CAUSE,
2194 	};
2195 
2196 	unsigned int i;
2197 
2198 	for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
2199 		t4_write_reg(adapter, cause_reg[i], 0xffffffff);
2200 
2201 	t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
2202 	(void) t4_read_reg(adapter, A_PL_INT_CAUSE);          /* flush */
2203 }
2204 
2205 /**
2206  *	hash_mac_addr - return the hash value of a MAC address
2207  *	@addr: the 48-bit Ethernet MAC address
2208  *
2209  *	Hashes a MAC address according to the hash function used by HW inexact
2210  *	(hash) address matching.
2211  */
2212 static int hash_mac_addr(const u8 *addr)
2213 {
2214 	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2215 	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2216 	a ^= b;
2217 	a ^= (a >> 12);
2218 	a ^= (a >> 6);
2219 	return a & 0x3f;
2220 }
2221 
2222 /**
2223  *	t4_config_rss_range - configure a portion of the RSS mapping table
2224  *	@adapter: the adapter
2225  *	@mbox: mbox to use for the FW command
2226  *	@viid: virtual interface whose RSS subtable is to be written
2227  *	@start: start entry in the table to write
2228  *	@n: how many table entries to write
2229  *	@rspq: values for the "response queue" (Ingress Queue) lookup table
2230  *	@nrspq: number of values in @rspq
2231  *
2232  *	Programs the selected part of the VI's RSS mapping table with the
2233  *	provided values.  If @nrspq < @n the supplied values are used repeatedly
2234  *	until the full table range is populated.
2235  *
2236  *	The caller must ensure the values in @rspq are in the range allowed for
2237  *	@viid.
2238  */
2239 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2240 			int start, int n, const u16 *rspq, unsigned int nrspq)
2241 {
2242 	int ret;
2243 	const u16 *rsp = rspq;
2244 	const u16 *rsp_end = rspq + nrspq;
2245 	struct fw_rss_ind_tbl_cmd cmd;
2246 
2247 	memset(&cmd, 0, sizeof(cmd));
2248 	cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2249 			       F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2250 			       V_FW_RSS_IND_TBL_CMD_VIID(viid));
2251 	cmd.retval_len16 = htonl(FW_LEN16(cmd));
2252 
2253 
2254 	/*
2255 	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2256 	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
2257 	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2258 	 * reserved.
2259 	 */
2260 	while (n > 0) {
2261 		int nq = min(n, 32);
2262 		__be32 *qp = &cmd.iq0_to_iq2;
2263 
2264 		/*
2265 		 * Set up the firmware RSS command header to send the next
2266 		 * "nq" Ingress Queue IDs to the firmware.
2267 		 */
2268 		cmd.niqid = htons(nq);
2269 		cmd.startidx = htons(start);
2270 
2271 		/*
2272 		 * "nq" more done for the start of the next loop.
2273 		 */
2274 		start += nq;
2275 		n -= nq;
2276 
2277 		/*
2278 		 * While there are still Ingress Queue IDs to stuff into the
2279 		 * current firmware RSS command, retrieve them from the
2280 		 * Ingress Queue ID array and insert them into the command.
2281 		 */
2282 		while (nq > 0) {
2283 			unsigned int v;
2284 			/*
2285 			 * Grab up to the next 3 Ingress Queue IDs (wrapping
2286 			 * around the Ingress Queue ID array if necessary) and
2287 			 * insert them into the firmware RSS command at the
2288 			 * current 3-tuple position within the commad.
2289 			 */
2290 			v = V_FW_RSS_IND_TBL_CMD_IQ0(*rsp);
2291 			if (++rsp >= rsp_end)
2292 				rsp = rspq;
2293 			v |= V_FW_RSS_IND_TBL_CMD_IQ1(*rsp);
2294 			if (++rsp >= rsp_end)
2295 				rsp = rspq;
2296 			v |= V_FW_RSS_IND_TBL_CMD_IQ2(*rsp);
2297 			if (++rsp >= rsp_end)
2298 				rsp = rspq;
2299 
2300 			*qp++ = htonl(v);
2301 			nq -= 3;
2302 		}
2303 
2304 		/*
2305 		 * Send this portion of the RRS table update to the firmware;
2306 		 * bail out on any errors.
2307 		 */
2308 		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2309 		if (ret)
2310 			return ret;
2311 	}
2312 
2313 	return 0;
2314 }
2315 
2316 /**
2317  *	t4_config_glbl_rss - configure the global RSS mode
2318  *	@adapter: the adapter
2319  *	@mbox: mbox to use for the FW command
2320  *	@mode: global RSS mode
2321  *	@flags: mode-specific flags
2322  *
2323  *	Sets the global RSS mode.
2324  */
2325 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2326 		       unsigned int flags)
2327 {
2328 	struct fw_rss_glb_config_cmd c;
2329 
2330 	memset(&c, 0, sizeof(c));
2331 	c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2332 			      F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2333 	c.retval_len16 = htonl(FW_LEN16(c));
2334 	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2335 		c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2336 	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2337 		c.u.basicvirtual.mode_pkd =
2338 			htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2339 		c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2340 	} else
2341 		return -EINVAL;
2342 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2343 }
2344 
2345 /**
2346  *	t4_config_vi_rss - configure per VI RSS settings
2347  *	@adapter: the adapter
2348  *	@mbox: mbox to use for the FW command
2349  *	@viid: the VI id
2350  *	@flags: RSS flags
2351  *	@defq: id of the default RSS queue for the VI.
2352  *
2353  *	Configures VI-specific RSS properties.
2354  */
2355 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2356 		     unsigned int flags, unsigned int defq)
2357 {
2358 	struct fw_rss_vi_config_cmd c;
2359 
2360 	memset(&c, 0, sizeof(c));
2361 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2362 			     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2363 			     V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2364 	c.retval_len16 = htonl(FW_LEN16(c));
2365 	c.u.basicvirtual.defaultq_to_udpen = htonl(flags |
2366 					V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2367 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2368 }
2369 
2370 /* Read an RSS table row */
2371 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
2372 {
2373 	t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
2374 	return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
2375 				   5, 0, val);
2376 }
2377 
2378 /**
2379  *	t4_read_rss - read the contents of the RSS mapping table
2380  *	@adapter: the adapter
2381  *	@map: holds the contents of the RSS mapping table
2382  *
2383  *	Reads the contents of the RSS hash->queue mapping table.
2384  */
2385 int t4_read_rss(struct adapter *adapter, u16 *map)
2386 {
2387 	u32 val;
2388 	int i, ret;
2389 
2390 	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
2391 		ret = rd_rss_row(adapter, i, &val);
2392 		if (ret)
2393 			return ret;
2394 		*map++ = G_LKPTBLQUEUE0(val);
2395 		*map++ = G_LKPTBLQUEUE1(val);
2396 	}
2397 	return 0;
2398 }
2399 
2400 /**
2401  *	t4_read_rss_key - read the global RSS key
2402  *	@adap: the adapter
2403  *	@key: 10-entry array holding the 320-bit RSS key
2404  *
2405  *	Reads the global 320-bit RSS key.
2406  */
2407 void t4_read_rss_key(struct adapter *adap, u32 *key)
2408 {
2409 	t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2410 			 A_TP_RSS_SECRET_KEY0);
2411 }
2412 
2413 /**
2414  *	t4_write_rss_key - program one of the RSS keys
2415  *	@adap: the adapter
2416  *	@key: 10-entry array holding the 320-bit RSS key
2417  *	@idx: which RSS key to write
2418  *
2419  *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
2420  *	0..15 the corresponding entry in the RSS key table is written,
2421  *	otherwise the global RSS key is written.
2422  */
2423 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
2424 {
2425 	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2426 			  A_TP_RSS_SECRET_KEY0);
2427 	if (idx >= 0 && idx < 16)
2428 		t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2429 			     V_KEYWRADDR(idx) | F_KEYWREN);
2430 }
2431 
2432 /**
2433  *	t4_read_rss_pf_config - read PF RSS Configuration Table
2434  *	@adapter: the adapter
2435  *	@index: the entry in the PF RSS table to read
2436  *	@valp: where to store the returned value
2437  *
2438  *	Reads the PF RSS Configuration Table at the specified index and returns
2439  *	the value found there.
2440  */
2441 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp)
2442 {
2443 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2444 			 valp, 1, A_TP_RSS_PF0_CONFIG + index);
2445 }
2446 
2447 /**
2448  *	t4_write_rss_pf_config - write PF RSS Configuration Table
2449  *	@adapter: the adapter
2450  *	@index: the entry in the VF RSS table to read
2451  *	@val: the value to store
2452  *
2453  *	Writes the PF RSS Configuration Table at the specified index with the
2454  *	specified value.
2455  */
2456 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val)
2457 {
2458 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2459 			  &val, 1, A_TP_RSS_PF0_CONFIG + index);
2460 }
2461 
2462 /**
2463  *	t4_read_rss_vf_config - read VF RSS Configuration Table
2464  *	@adapter: the adapter
2465  *	@index: the entry in the VF RSS table to read
2466  *	@vfl: where to store the returned VFL
2467  *	@vfh: where to store the returned VFH
2468  *
2469  *	Reads the VF RSS Configuration Table at the specified index and returns
2470  *	the (VFL, VFH) values found there.
2471  */
2472 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
2473 			   u32 *vfl, u32 *vfh)
2474 {
2475 	u32 vrt;
2476 
2477 	/*
2478 	 * Request that the index'th VF Table values be read into VFL/VFH.
2479 	 */
2480 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2481 	vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN);
2482 	vrt |= V_VFWRADDR(index) | F_VFRDEN;
2483 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2484 
2485 	/*
2486 	 * Grab the VFL/VFH values ...
2487 	 */
2488 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2489 			 vfl, 1, A_TP_RSS_VFL_CONFIG);
2490 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2491 			 vfh, 1, A_TP_RSS_VFH_CONFIG);
2492 }
2493 
2494 /**
2495  *	t4_write_rss_vf_config - write VF RSS Configuration Table
2496  *
2497  *	@adapter: the adapter
2498  *	@index: the entry in the VF RSS table to write
2499  *	@vfl: the VFL to store
2500  *	@vfh: the VFH to store
2501  *
2502  *	Writes the VF RSS Configuration Table at the specified index with the
2503  *	specified (VFL, VFH) values.
2504  */
2505 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
2506 			    u32 vfl, u32 vfh)
2507 {
2508 	u32 vrt;
2509 
2510 	/*
2511 	 * Load up VFL/VFH with the values to be written ...
2512 	 */
2513 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2514 			  &vfl, 1, A_TP_RSS_VFL_CONFIG);
2515 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2516 			  &vfh, 1, A_TP_RSS_VFH_CONFIG);
2517 
2518 	/*
2519 	 * Write the VFL/VFH into the VF Table at index'th location.
2520 	 */
2521 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2522 	vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN);
2523 	vrt |= V_VFWRADDR(index) | F_VFWREN;
2524 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2525 }
2526 
2527 /**
2528  *	t4_read_rss_pf_map - read PF RSS Map
2529  *	@adapter: the adapter
2530  *
2531  *	Reads the PF RSS Map register and returns its value.
2532  */
2533 u32 t4_read_rss_pf_map(struct adapter *adapter)
2534 {
2535 	u32 pfmap;
2536 
2537 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2538 			 &pfmap, 1, A_TP_RSS_PF_MAP);
2539 	return pfmap;
2540 }
2541 
2542 /**
2543  *	t4_write_rss_pf_map - write PF RSS Map
2544  *	@adapter: the adapter
2545  *	@pfmap: PF RSS Map value
2546  *
2547  *	Writes the specified value to the PF RSS Map register.
2548  */
2549 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
2550 {
2551 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2552 			  &pfmap, 1, A_TP_RSS_PF_MAP);
2553 }
2554 
2555 /**
2556  *	t4_read_rss_pf_mask - read PF RSS Mask
2557  *	@adapter: the adapter
2558  *
2559  *	Reads the PF RSS Mask register and returns its value.
2560  */
2561 u32 t4_read_rss_pf_mask(struct adapter *adapter)
2562 {
2563 	u32 pfmask;
2564 
2565 	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2566 			 &pfmask, 1, A_TP_RSS_PF_MSK);
2567 	return pfmask;
2568 }
2569 
2570 /**
2571  *	t4_write_rss_pf_mask - write PF RSS Mask
2572  *	@adapter: the adapter
2573  *	@pfmask: PF RSS Mask value
2574  *
2575  *	Writes the specified value to the PF RSS Mask register.
2576  */
2577 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
2578 {
2579 	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2580 			  &pfmask, 1, A_TP_RSS_PF_MSK);
2581 }
2582 
2583 /**
2584  *	t4_set_filter_mode - configure the optional components of filter tuples
2585  *	@adap: the adapter
2586  *	@mode_map: a bitmap selcting which optional filter components to enable
2587  *
2588  *	Sets the filter mode by selecting the optional components to enable
2589  *	in filter tuples.  Returns 0 on success and a negative error if the
2590  *	requested mode needs more bits than are available for optional
2591  *	components.
2592  */
2593 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
2594 {
2595 	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
2596 
2597 	int i, nbits = 0;
2598 
2599 	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
2600 		if (mode_map & (1 << i))
2601 			nbits += width[i];
2602 	if (nbits > FILTER_OPT_LEN)
2603 		return -EINVAL;
2604 	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
2605 			  A_TP_VLAN_PRI_MAP);
2606 	return 0;
2607 }
2608 
2609 /**
2610  *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
2611  *	@adap: the adapter
2612  *	@v4: holds the TCP/IP counter values
2613  *	@v6: holds the TCP/IPv6 counter values
2614  *
2615  *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2616  *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2617  */
2618 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2619 			 struct tp_tcp_stats *v6)
2620 {
2621 	u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
2622 
2623 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
2624 #define STAT(x)     val[STAT_IDX(x)]
2625 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2626 
2627 	if (v4) {
2628 		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2629 				 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
2630 		v4->tcpOutRsts = STAT(OUT_RST);
2631 		v4->tcpInSegs  = STAT64(IN_SEG);
2632 		v4->tcpOutSegs = STAT64(OUT_SEG);
2633 		v4->tcpRetransSegs = STAT64(RXT_SEG);
2634 	}
2635 	if (v6) {
2636 		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2637 				 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
2638 		v6->tcpOutRsts = STAT(OUT_RST);
2639 		v6->tcpInSegs  = STAT64(IN_SEG);
2640 		v6->tcpOutSegs = STAT64(OUT_SEG);
2641 		v6->tcpRetransSegs = STAT64(RXT_SEG);
2642 	}
2643 #undef STAT64
2644 #undef STAT
2645 #undef STAT_IDX
2646 }
2647 
2648 /**
2649  *	t4_tp_get_err_stats - read TP's error MIB counters
2650  *	@adap: the adapter
2651  *	@st: holds the counter values
2652  *
2653  *	Returns the values of TP's error counters.
2654  */
2655 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
2656 {
2657 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs,
2658 			 12, A_TP_MIB_MAC_IN_ERR_0);
2659 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops,
2660 			 8, A_TP_MIB_TNL_CNG_DROP_0);
2661 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops,
2662 			 4, A_TP_MIB_TNL_DROP_0);
2663 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops,
2664 			 4, A_TP_MIB_OFD_VLN_DROP_0);
2665 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs,
2666 			 4, A_TP_MIB_TCP_V6IN_ERR_0);
2667 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh,
2668 			 2, A_TP_MIB_OFD_ARP_DROP);
2669 }
2670 
2671 /**
2672  *	t4_tp_get_proxy_stats - read TP's proxy MIB counters
2673  *	@adap: the adapter
2674  *	@st: holds the counter values
2675  *
2676  *	Returns the values of TP's proxy counters.
2677  */
2678 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
2679 {
2680 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
2681 			 4, A_TP_MIB_TNL_LPBK_0);
2682 }
2683 
2684 /**
2685  *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
2686  *	@adap: the adapter
2687  *	@st: holds the counter values
2688  *
2689  *	Returns the values of TP's CPL counters.
2690  */
2691 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
2692 {
2693 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
2694 			 8, A_TP_MIB_CPL_IN_REQ_0);
2695 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tx_err,
2696 			 4, A_TP_MIB_CPL_OUT_ERR_0);
2697 }
2698 
2699 /**
2700  *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
2701  *	@adap: the adapter
2702  *	@st: holds the counter values
2703  *
2704  *	Returns the values of TP's RDMA counters.
2705  */
2706 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
2707 {
2708 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod,
2709 			 2, A_TP_MIB_RQE_DFR_MOD);
2710 }
2711 
2712 /**
2713  *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
2714  *	@adap: the adapter
2715  *	@idx: the port index
2716  *	@st: holds the counter values
2717  *
2718  *	Returns the values of TP's FCoE counters for the selected port.
2719  */
2720 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
2721 		       struct tp_fcoe_stats *st)
2722 {
2723 	u32 val[2];
2724 
2725 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP,
2726 			 1, A_TP_MIB_FCOE_DDP_0 + idx);
2727 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop,
2728 			 1, A_TP_MIB_FCOE_DROP_0 + idx);
2729 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2730 			 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
2731 	st->octetsDDP = ((u64)val[0] << 32) | val[1];
2732 }
2733 
2734 /**
2735  *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
2736  *	@adap: the adapter
2737  *	@st: holds the counter values
2738  *
2739  *	Returns the values of TP's counters for non-TCP directly-placed packets.
2740  */
2741 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
2742 {
2743 	u32 val[4];
2744 
2745 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
2746 			 A_TP_MIB_USM_PKTS);
2747 	st->frames = val[0];
2748 	st->drops = val[1];
2749 	st->octets = ((u64)val[2] << 32) | val[3];
2750 }
2751 
2752 /**
2753  *	t4_read_mtu_tbl - returns the values in the HW path MTU table
2754  *	@adap: the adapter
2755  *	@mtus: where to store the MTU values
2756  *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
2757  *
2758  *	Reads the HW path MTU table.
2759  */
2760 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
2761 {
2762 	u32 v;
2763 	int i;
2764 
2765 	for (i = 0; i < NMTUS; ++i) {
2766 		t4_write_reg(adap, A_TP_MTU_TABLE,
2767 			     V_MTUINDEX(0xff) | V_MTUVALUE(i));
2768 		v = t4_read_reg(adap, A_TP_MTU_TABLE);
2769 		mtus[i] = G_MTUVALUE(v);
2770 		if (mtu_log)
2771 			mtu_log[i] = G_MTUWIDTH(v);
2772 	}
2773 }
2774 
2775 /**
2776  *	t4_read_cong_tbl - reads the congestion control table
2777  *	@adap: the adapter
2778  *	@incr: where to store the alpha values
2779  *
2780  *	Reads the additive increments programmed into the HW congestion
2781  *	control table.
2782  */
2783 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
2784 {
2785 	unsigned int mtu, w;
2786 
2787 	for (mtu = 0; mtu < NMTUS; ++mtu)
2788 		for (w = 0; w < NCCTRL_WIN; ++w) {
2789 			t4_write_reg(adap, A_TP_CCTRL_TABLE,
2790 				     V_ROWINDEX(0xffff) | (mtu << 5) | w);
2791 			incr[mtu][w] = (u16)t4_read_reg(adap,
2792 						A_TP_CCTRL_TABLE) & 0x1fff;
2793 		}
2794 }
2795 
2796 /**
2797  *	t4_read_pace_tbl - read the pace table
2798  *	@adap: the adapter
2799  *	@pace_vals: holds the returned values
2800  *
2801  *	Returns the values of TP's pace table in microseconds.
2802  */
2803 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
2804 {
2805 	unsigned int i, v;
2806 
2807 	for (i = 0; i < NTX_SCHED; i++) {
2808 		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
2809 		v = t4_read_reg(adap, A_TP_PACE_TABLE);
2810 		pace_vals[i] = dack_ticks_to_usec(adap, v);
2811 	}
2812 }
2813 
2814 /**
2815  *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
2816  *	@adap: the adapter
2817  *	@addr: the indirect TP register address
2818  *	@mask: specifies the field within the register to modify
2819  *	@val: new value for the field
2820  *
2821  *	Sets a field of an indirect TP register to the given value.
2822  */
2823 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
2824 			    unsigned int mask, unsigned int val)
2825 {
2826 	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
2827 	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
2828 	t4_write_reg(adap, A_TP_PIO_DATA, val);
2829 }
2830 
2831 /**
2832  *	init_cong_ctrl - initialize congestion control parameters
2833  *	@a: the alpha values for congestion control
2834  *	@b: the beta values for congestion control
2835  *
2836  *	Initialize the congestion control parameters.
2837  */
2838 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2839 {
2840 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2841 	a[9] = 2;
2842 	a[10] = 3;
2843 	a[11] = 4;
2844 	a[12] = 5;
2845 	a[13] = 6;
2846 	a[14] = 7;
2847 	a[15] = 8;
2848 	a[16] = 9;
2849 	a[17] = 10;
2850 	a[18] = 14;
2851 	a[19] = 17;
2852 	a[20] = 21;
2853 	a[21] = 25;
2854 	a[22] = 30;
2855 	a[23] = 35;
2856 	a[24] = 45;
2857 	a[25] = 60;
2858 	a[26] = 80;
2859 	a[27] = 100;
2860 	a[28] = 200;
2861 	a[29] = 300;
2862 	a[30] = 400;
2863 	a[31] = 500;
2864 
2865 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2866 	b[9] = b[10] = 1;
2867 	b[11] = b[12] = 2;
2868 	b[13] = b[14] = b[15] = b[16] = 3;
2869 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2870 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2871 	b[28] = b[29] = 6;
2872 	b[30] = b[31] = 7;
2873 }
2874 
2875 /* The minimum additive increment value for the congestion control table */
2876 #define CC_MIN_INCR 2U
2877 
2878 /**
2879  *	t4_load_mtus - write the MTU and congestion control HW tables
2880  *	@adap: the adapter
2881  *	@mtus: the values for the MTU table
2882  *	@alpha: the values for the congestion control alpha parameter
2883  *	@beta: the values for the congestion control beta parameter
2884  *
2885  *	Write the HW MTU table with the supplied MTUs and the high-speed
2886  *	congestion control table with the supplied alpha, beta, and MTUs.
2887  *	We write the two tables together because the additive increments
2888  *	depend on the MTUs.
2889  */
2890 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2891 		  const unsigned short *alpha, const unsigned short *beta)
2892 {
2893 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
2894 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2895 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2896 		28672, 40960, 57344, 81920, 114688, 163840, 229376
2897 	};
2898 
2899 	unsigned int i, w;
2900 
2901 	for (i = 0; i < NMTUS; ++i) {
2902 		unsigned int mtu = mtus[i];
2903 		unsigned int log2 = fls(mtu);
2904 
2905 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
2906 			log2--;
2907 		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
2908 			     V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
2909 
2910 		for (w = 0; w < NCCTRL_WIN; ++w) {
2911 			unsigned int inc;
2912 
2913 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2914 				  CC_MIN_INCR);
2915 
2916 			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2917 				     (w << 16) | (beta[w] << 13) | inc);
2918 		}
2919 	}
2920 }
2921 
2922 /**
2923  *	t4_set_pace_tbl - set the pace table
2924  *	@adap: the adapter
2925  *	@pace_vals: the pace values in microseconds
2926  *	@start: index of the first entry in the HW pace table to set
2927  *	@n: how many entries to set
2928  *
2929  *	Sets (a subset of the) HW pace table.
2930  */
2931 int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
2932 		     unsigned int start, unsigned int n)
2933 {
2934 	unsigned int vals[NTX_SCHED], i;
2935 	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
2936 
2937 	if (n > NTX_SCHED)
2938 	    return -ERANGE;
2939 
2940 	/* convert values from us to dack ticks, rounding to closest value */
2941 	for (i = 0; i < n; i++, pace_vals++) {
2942 		vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
2943 		if (vals[i] > 0x7ff)
2944 			return -ERANGE;
2945 		if (*pace_vals && vals[i] == 0)
2946 			return -ERANGE;
2947 	}
2948 	for (i = 0; i < n; i++, start++)
2949 		t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
2950 	return 0;
2951 }
2952 
2953 /**
2954  *	t4_set_sched_bps - set the bit rate for a HW traffic scheduler
2955  *	@adap: the adapter
2956  *	@kbps: target rate in Kbps
2957  *	@sched: the scheduler index
2958  *
2959  *	Configure a Tx HW scheduler for the target rate.
2960  */
2961 int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
2962 {
2963 	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2964 	unsigned int clk = adap->params.vpd.cclk * 1000;
2965 	unsigned int selected_cpt = 0, selected_bpt = 0;
2966 
2967 	if (kbps > 0) {
2968 		kbps *= 125;     /* -> bytes */
2969 		for (cpt = 1; cpt <= 255; cpt++) {
2970 			tps = clk / cpt;
2971 			bpt = (kbps + tps / 2) / tps;
2972 			if (bpt > 0 && bpt <= 255) {
2973 				v = bpt * tps;
2974 				delta = v >= kbps ? v - kbps : kbps - v;
2975 				if (delta < mindelta) {
2976 					mindelta = delta;
2977 					selected_cpt = cpt;
2978 					selected_bpt = bpt;
2979 				}
2980 			} else if (selected_cpt)
2981 				break;
2982 		}
2983 		if (!selected_cpt)
2984 			return -EINVAL;
2985 	}
2986 	t4_write_reg(adap, A_TP_TM_PIO_ADDR,
2987 		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2988 	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
2989 	if (sched & 1)
2990 		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2991 	else
2992 		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2993 	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
2994 	return 0;
2995 }
2996 
2997 /**
2998  *	t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
2999  *	@adap: the adapter
3000  *	@sched: the scheduler index
3001  *	@ipg: the interpacket delay in tenths of nanoseconds
3002  *
3003  *	Set the interpacket delay for a HW packet rate scheduler.
3004  */
3005 int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
3006 {
3007 	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3008 
3009 	/* convert ipg to nearest number of core clocks */
3010 	ipg *= core_ticks_per_usec(adap);
3011 	ipg = (ipg + 5000) / 10000;
3012 	if (ipg > M_TXTIMERSEPQ0)
3013 		return -EINVAL;
3014 
3015 	t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3016 	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3017 	if (sched & 1)
3018 		v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
3019 	else
3020 		v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
3021 	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3022 	t4_read_reg(adap, A_TP_TM_PIO_DATA);
3023 	return 0;
3024 }
3025 
3026 /**
3027  *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3028  *	@adap: the adapter
3029  *	@sched: the scheduler index
3030  *	@kbps: the byte rate in Kbps
3031  *	@ipg: the interpacket delay in tenths of nanoseconds
3032  *
3033  *	Return the current configuration of a HW Tx scheduler.
3034  */
3035 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
3036 		     unsigned int *ipg)
3037 {
3038 	unsigned int v, addr, bpt, cpt;
3039 
3040 	if (kbps) {
3041 		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3042 		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3043 		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3044 		if (sched & 1)
3045 			v >>= 16;
3046 		bpt = (v >> 8) & 0xff;
3047 		cpt = v & 0xff;
3048 		if (!cpt)
3049 			*kbps = 0;        /* scheduler disabled */
3050 		else {
3051 			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
3052 			*kbps = (v * bpt) / 125;
3053 		}
3054 	}
3055 	if (ipg) {
3056 		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3057 		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3058 		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3059 		if (sched & 1)
3060 			v >>= 16;
3061 		v &= 0xffff;
3062 		*ipg = (10000 * v) / core_ticks_per_usec(adap);
3063 	}
3064 }
3065 
3066 /*
3067  * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
3068  * clocks.  The formula is
3069  *
3070  * bytes/s = bytes256 * 256 * ClkFreq / 4096
3071  *
3072  * which is equivalent to
3073  *
3074  * bytes/s = 62.5 * bytes256 * ClkFreq_ms
3075  */
3076 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
3077 {
3078 	u64 v = bytes256 * adap->params.vpd.cclk;
3079 
3080 	return v * 62 + v / 2;
3081 }
3082 
3083 /**
3084  *	t4_get_chan_txrate - get the current per channel Tx rates
3085  *	@adap: the adapter
3086  *	@nic_rate: rates for NIC traffic
3087  *	@ofld_rate: rates for offloaded traffic
3088  *
3089  *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
3090  *	for each channel.
3091  */
3092 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
3093 {
3094 	u32 v;
3095 
3096 	v = t4_read_reg(adap, A_TP_TX_TRATE);
3097 	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
3098 	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
3099 	nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
3100 	nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
3101 
3102 	v = t4_read_reg(adap, A_TP_TX_ORATE);
3103 	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
3104 	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
3105 	ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
3106 	ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
3107 }
3108 
3109 /**
3110  *	t4_set_trace_filter - configure one of the tracing filters
3111  *	@adap: the adapter
3112  *	@tp: the desired trace filter parameters
3113  *	@idx: which filter to configure
3114  *	@enable: whether to enable or disable the filter
3115  *
3116  *	Configures one of the tracing filters available in HW.  If @enable is
3117  *	%0 @tp is not examined and may be %NULL.
3118  */
3119 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx,
3120 			int enable)
3121 {
3122 	int i, ofst = idx * 4;
3123 	u32 data_reg, mask_reg, cfg;
3124 	u32 multitrc = F_TRCMULTIFILTER;
3125 
3126 	if (!enable) {
3127 		t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3128 		goto out;
3129 	}
3130 
3131 	if (tp->port > 11 || tp->invert > 1 || tp->skip_len > M_TFLENGTH ||
3132 	    tp->skip_ofst > M_TFOFFSET || tp->min_len > M_TFMINPKTSIZE ||
3133 	    tp->snap_len > 9600 || (idx && tp->snap_len > 256))
3134 		return -EINVAL;
3135 
3136 	if (tp->snap_len > 256) {            /* must be tracer 0 */
3137 		if ((t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + 4) |
3138 		     t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + 8) |
3139 		     t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + 12)) &
3140 		    F_TFEN)
3141 			return -EINVAL;  /* other tracers are enabled */
3142 		multitrc = 0;
3143 	} else if (idx) {
3144 		i = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B);
3145 		if (G_TFCAPTUREMAX(i) > 256 &&
3146 		    (t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A) & F_TFEN))
3147 			return -EINVAL;
3148 	}
3149 
3150 	/* stop the tracer we'll be changing */
3151 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3152 
3153 	/* disable tracing globally if running in the wrong single/multi mode */
3154 	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
3155 	if ((cfg & F_TRCEN) && multitrc != (cfg & F_TRCMULTIFILTER)) {
3156 		t4_write_reg(adap, A_MPS_TRC_CFG, cfg ^ F_TRCEN);
3157 		t4_read_reg(adap, A_MPS_TRC_CFG);                  /* flush */
3158 		msleep(1);
3159 		if (!(t4_read_reg(adap, A_MPS_TRC_CFG) & F_TRCFIFOEMPTY))
3160 			return -ETIMEDOUT;
3161 	}
3162 	/*
3163 	 * At this point either the tracing is enabled and in the right mode or
3164 	 * disabled.
3165 	 */
3166 
3167 	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
3168 	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
3169 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
3170 
3171 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3172 		t4_write_reg(adap, data_reg, tp->data[i]);
3173 		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
3174 	}
3175 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
3176 		     V_TFCAPTUREMAX(tp->snap_len) |
3177 		     V_TFMINPKTSIZE(tp->min_len));
3178 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
3179 		     V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
3180 		     V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert));
3181 
3182 	cfg &= ~F_TRCMULTIFILTER;
3183 	t4_write_reg(adap, A_MPS_TRC_CFG, cfg | F_TRCEN | multitrc);
3184 out:	t4_read_reg(adap, A_MPS_TRC_CFG);  /* flush */
3185 	return 0;
3186 }
3187 
3188 /**
3189  *	t4_get_trace_filter - query one of the tracing filters
3190  *	@adap: the adapter
3191  *	@tp: the current trace filter parameters
3192  *	@idx: which trace filter to query
3193  *	@enabled: non-zero if the filter is enabled
3194  *
3195  *	Returns the current settings of one of the HW tracing filters.
3196  */
3197 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
3198 			 int *enabled)
3199 {
3200 	u32 ctla, ctlb;
3201 	int i, ofst = idx * 4;
3202 	u32 data_reg, mask_reg;
3203 
3204 	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
3205 	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
3206 
3207 	*enabled = !!(ctla & F_TFEN);
3208 	tp->snap_len = G_TFCAPTUREMAX(ctlb);
3209 	tp->min_len = G_TFMINPKTSIZE(ctlb);
3210 	tp->skip_ofst = G_TFOFFSET(ctla);
3211 	tp->skip_len = G_TFLENGTH(ctla);
3212 	tp->invert = !!(ctla & F_TFINVERTMATCH);
3213 	tp->port = G_TFPORT(ctla);
3214 
3215 	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
3216 	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
3217 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
3218 
3219 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3220 		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
3221 		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
3222 	}
3223 }
3224 
3225 /**
3226  *	t4_pmtx_get_stats - returns the HW stats from PMTX
3227  *	@adap: the adapter
3228  *	@cnt: where to store the count statistics
3229  *	@cycles: where to store the cycle statistics
3230  *
3231  *	Returns performance statistics from PMTX.
3232  */
3233 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3234 {
3235 	int i;
3236 
3237 	for (i = 0; i < PM_NSTATS; i++) {
3238 		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
3239 		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
3240 		cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
3241 	}
3242 }
3243 
3244 /**
3245  *	t4_pmrx_get_stats - returns the HW stats from PMRX
3246  *	@adap: the adapter
3247  *	@cnt: where to store the count statistics
3248  *	@cycles: where to store the cycle statistics
3249  *
3250  *	Returns performance statistics from PMRX.
3251  */
3252 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3253 {
3254 	int i;
3255 
3256 	for (i = 0; i < PM_NSTATS; i++) {
3257 		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
3258 		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
3259 		cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
3260 	}
3261 }
3262 
3263 /**
3264  *	get_mps_bg_map - return the buffer groups associated with a port
3265  *	@adap: the adapter
3266  *	@idx: the port index
3267  *
3268  *	Returns a bitmap indicating which MPS buffer groups are associated
3269  *	with the given port.  Bit i is set if buffer group i is used by the
3270  *	port.
3271  */
3272 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
3273 {
3274 	u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
3275 
3276 	if (n == 0)
3277 		return idx == 0 ? 0xf : 0;
3278 	if (n == 1)
3279 		return idx < 2 ? (3 << (2 * idx)) : 0;
3280 	return 1 << idx;
3281 }
3282 
3283 /**
3284  *	t4_get_port_stats - collect port statistics
3285  *	@adap: the adapter
3286  *	@idx: the port index
3287  *	@p: the stats structure to fill
3288  *
3289  *	Collect statistics related to the given port from HW.
3290  */
3291 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3292 {
3293 	u32 bgmap = get_mps_bg_map(adap, idx);
3294 
3295 #define GET_STAT(name) \
3296 	t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))
3297 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3298 
3299 	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
3300 	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
3301 	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
3302 	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
3303 	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
3304 	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
3305 	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
3306 	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
3307 	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
3308 	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
3309 	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
3310 	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3311 	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
3312 	p->tx_drop             = GET_STAT(TX_PORT_DROP);
3313 	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
3314 	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
3315 	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
3316 	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
3317 	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
3318 	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
3319 	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
3320 	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
3321 	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
3322 
3323 	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
3324 	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
3325 	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
3326 	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
3327 	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
3328 	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
3329 	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3330 	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
3331 	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
3332 	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
3333 	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
3334 	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
3335 	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
3336 	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
3337 	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
3338 	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
3339 	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3340 	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
3341 	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
3342 	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
3343 	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
3344 	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
3345 	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
3346 	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
3347 	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
3348 	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
3349 	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
3350 
3351 	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3352 	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3353 	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3354 	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3355 	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3356 	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3357 	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3358 	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3359 
3360 #undef GET_STAT
3361 #undef GET_STAT_COM
3362 }
3363 
3364 /**
3365  *	t4_clr_port_stats - clear port statistics
3366  *	@adap: the adapter
3367  *	@idx: the port index
3368  *
3369  *	Clear HW statistics for the given port.
3370  */
3371 void t4_clr_port_stats(struct adapter *adap, int idx)
3372 {
3373 	unsigned int i;
3374 	u32 bgmap = get_mps_bg_map(adap, idx);
3375 
3376 	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3377 	     i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3378 		t4_write_reg(adap, PORT_REG(idx, i), 0);
3379 	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3380 	     i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3381 		t4_write_reg(adap, PORT_REG(idx, i), 0);
3382 	for (i = 0; i < 4; i++)
3383 		if (bgmap & (1 << i)) {
3384 			t4_write_reg(adap,
3385 				A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
3386 			t4_write_reg(adap,
3387 				A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
3388 		}
3389 }
3390 
3391 /**
3392  *	t4_get_lb_stats - collect loopback port statistics
3393  *	@adap: the adapter
3394  *	@idx: the loopback port index
3395  *	@p: the stats structure to fill
3396  *
3397  *	Return HW statistics for the given loopback port.
3398  */
3399 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
3400 {
3401 	u32 bgmap = get_mps_bg_map(adap, idx);
3402 
3403 #define GET_STAT(name) \
3404 	t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))
3405 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3406 
3407 	p->octets           = GET_STAT(BYTES);
3408 	p->frames           = GET_STAT(FRAMES);
3409 	p->bcast_frames     = GET_STAT(BCAST);
3410 	p->mcast_frames     = GET_STAT(MCAST);
3411 	p->ucast_frames     = GET_STAT(UCAST);
3412 	p->error_frames     = GET_STAT(ERROR);
3413 
3414 	p->frames_64        = GET_STAT(64B);
3415 	p->frames_65_127    = GET_STAT(65B_127B);
3416 	p->frames_128_255   = GET_STAT(128B_255B);
3417 	p->frames_256_511   = GET_STAT(256B_511B);
3418 	p->frames_512_1023  = GET_STAT(512B_1023B);
3419 	p->frames_1024_1518 = GET_STAT(1024B_1518B);
3420 	p->frames_1519_max  = GET_STAT(1519B_MAX);
3421 	p->drop             = t4_read_reg(adap, PORT_REG(idx,
3422 					  A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
3423 
3424 	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
3425 	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
3426 	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
3427 	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
3428 	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
3429 	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
3430 	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
3431 	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
3432 
3433 #undef GET_STAT
3434 #undef GET_STAT_COM
3435 }
3436 
3437 /**
3438  *	t4_wol_magic_enable - enable/disable magic packet WoL
3439  *	@adap: the adapter
3440  *	@port: the physical port index
3441  *	@addr: MAC address expected in magic packets, %NULL to disable
3442  *
3443  *	Enables/disables magic packet wake-on-LAN for the selected port.
3444  */
3445 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
3446 			 const u8 *addr)
3447 {
3448 	if (addr) {
3449 		t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO),
3450 			     (addr[2] << 24) | (addr[3] << 16) |
3451 			     (addr[4] << 8) | addr[5]);
3452 		t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI),
3453 			     (addr[0] << 8) | addr[1]);
3454 	}
3455 	t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), F_MAGICEN,
3456 			 V_MAGICEN(addr != NULL));
3457 }
3458 
3459 /**
3460  *	t4_wol_pat_enable - enable/disable pattern-based WoL
3461  *	@adap: the adapter
3462  *	@port: the physical port index
3463  *	@map: bitmap of which HW pattern filters to set
3464  *	@mask0: byte mask for bytes 0-63 of a packet
3465  *	@mask1: byte mask for bytes 64-127 of a packet
3466  *	@crc: Ethernet CRC for selected bytes
3467  *	@enable: enable/disable switch
3468  *
3469  *	Sets the pattern filters indicated in @map to mask out the bytes
3470  *	specified in @mask0/@mask1 in received packets and compare the CRC of
3471  *	the resulting packet against @crc.  If @enable is %true pattern-based
3472  *	WoL is enabled, otherwise disabled.
3473  */
3474 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
3475 		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
3476 {
3477 	int i;
3478 
3479 	if (!enable) {
3480 		t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2),
3481 				 F_PATEN, 0);
3482 		return 0;
3483 	}
3484 	if (map > 0xff)
3485 		return -EINVAL;
3486 
3487 #define EPIO_REG(name) PORT_REG(port, A_XGMAC_PORT_EPIO_##name)
3488 
3489 	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
3490 	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
3491 	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
3492 
3493 	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
3494 		if (!(map & 1))
3495 			continue;
3496 
3497 		/* write byte masks */
3498 		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
3499 		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
3500 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
3501 		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
3502 			return -ETIMEDOUT;
3503 
3504 		/* write CRC */
3505 		t4_write_reg(adap, EPIO_REG(DATA0), crc);
3506 		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
3507 		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
3508 		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
3509 			return -ETIMEDOUT;
3510 	}
3511 #undef EPIO_REG
3512 
3513 	t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), 0, F_PATEN);
3514 	return 0;
3515 }
3516 
3517 /**
3518  *	t4_mk_filtdelwr - create a delete filter WR
3519  *	@ftid: the filter ID
3520  *	@wr: the filter work request to populate
3521  *	@qid: ingress queue to receive the delete notification
3522  *
3523  *	Creates a filter work request to delete the supplied filter.  If @qid is
3524  *	negative the delete notification is suppressed.
3525  */
3526 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
3527 {
3528 	memset(wr, 0, sizeof(*wr));
3529 	wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR));
3530 	wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16));
3531 	wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
3532 			      V_FW_FILTER_WR_NOREPLY(qid < 0));
3533 	wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
3534 	if (qid >= 0)
3535 		wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
3536 }
3537 
3538 #define INIT_CMD(var, cmd, rd_wr) do { \
3539 	(var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \
3540 				  F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
3541 	(var).retval_len16 = htonl(FW_LEN16(var)); \
3542 } while (0)
3543 
3544 /**
3545  *	t4_mdio_rd - read a PHY register through MDIO
3546  *	@adap: the adapter
3547  *	@mbox: mailbox to use for the FW command
3548  *	@phy_addr: the PHY address
3549  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
3550  *	@reg: the register to read
3551  *	@valp: where to store the value
3552  *
3553  *	Issues a FW command through the given mailbox to read a PHY register.
3554  */
3555 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
3556 	       unsigned int mmd, unsigned int reg, unsigned int *valp)
3557 {
3558 	int ret;
3559 	struct fw_ldst_cmd c;
3560 
3561 	memset(&c, 0, sizeof(c));
3562 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3563 		F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
3564 	c.cycles_to_len16 = htonl(FW_LEN16(c));
3565 	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
3566 				   V_FW_LDST_CMD_MMD(mmd));
3567 	c.u.mdio.raddr = htons(reg);
3568 
3569 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3570 	if (ret == 0)
3571 		*valp = ntohs(c.u.mdio.rval);
3572 	return ret;
3573 }
3574 
3575 /**
3576  *	t4_mdio_wr - write a PHY register through MDIO
3577  *	@adap: the adapter
3578  *	@mbox: mailbox to use for the FW command
3579  *	@phy_addr: the PHY address
3580  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
3581  *	@reg: the register to write
3582  *	@valp: value to write
3583  *
3584  *	Issues a FW command through the given mailbox to write a PHY register.
3585  */
3586 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
3587 	       unsigned int mmd, unsigned int reg, unsigned int val)
3588 {
3589 	struct fw_ldst_cmd c;
3590 
3591 	memset(&c, 0, sizeof(c));
3592 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3593 		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
3594 	c.cycles_to_len16 = htonl(FW_LEN16(c));
3595 	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
3596 				   V_FW_LDST_CMD_MMD(mmd));
3597 	c.u.mdio.raddr = htons(reg);
3598 	c.u.mdio.rval = htons(val);
3599 
3600 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3601 }
3602 
3603 /**
3604  *	t4_sge_ctxt_rd - read an SGE context through FW
3605  *	@adap: the adapter
3606  *	@mbox: mailbox to use for the FW command
3607  *	@cid: the context id
3608  *	@ctype: the context type
3609  *	@data: where to store the context data
3610  *
3611  *	Issues a FW command through the given mailbox to read an SGE context.
3612  */
3613 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
3614 		   enum ctxt_type ctype, u32 *data)
3615 {
3616 	int ret;
3617 	struct fw_ldst_cmd c;
3618 
3619 	if (ctype == CTXT_EGRESS)
3620 		ret = FW_LDST_ADDRSPC_SGE_EGRC;
3621 	else if (ctype == CTXT_INGRESS)
3622 		ret = FW_LDST_ADDRSPC_SGE_INGC;
3623 	else if (ctype == CTXT_FLM)
3624 		ret = FW_LDST_ADDRSPC_SGE_FLMC;
3625 	else
3626 		ret = FW_LDST_ADDRSPC_SGE_CONMC;
3627 
3628 	memset(&c, 0, sizeof(c));
3629 	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3630 				  F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret));
3631 	c.cycles_to_len16 = htonl(FW_LEN16(c));
3632 	c.u.idctxt.physid = htonl(cid);
3633 
3634 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3635 	if (ret == 0) {
3636 		data[0] = ntohl(c.u.idctxt.ctxt_data0);
3637 		data[1] = ntohl(c.u.idctxt.ctxt_data1);
3638 		data[2] = ntohl(c.u.idctxt.ctxt_data2);
3639 		data[3] = ntohl(c.u.idctxt.ctxt_data3);
3640 		data[4] = ntohl(c.u.idctxt.ctxt_data4);
3641 		data[5] = ntohl(c.u.idctxt.ctxt_data5);
3642 	}
3643 	return ret;
3644 }
3645 
3646 /**
3647  *	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
3648  *	@adap: the adapter
3649  *	@cid: the context id
3650  *	@ctype: the context type
3651  *	@data: where to store the context data
3652  *
3653  *	Reads an SGE context directly, bypassing FW.  This is only for
3654  *	debugging when FW is unavailable.
3655  */
3656 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
3657 		      u32 *data)
3658 {
3659 	int i, ret;
3660 
3661 	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
3662 	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
3663 	if (!ret)
3664 		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
3665 			*data++ = t4_read_reg(adap, i);
3666 	return ret;
3667 }
3668 
3669 /**
3670  *	t4_fw_hello - establish communication with FW
3671  *	@adap: the adapter
3672  *	@mbox: mailbox to use for the FW command
3673  *	@evt_mbox: mailbox to receive async FW events
3674  *	@master: specifies the caller's willingness to be the device master
3675  *	@state: returns the current device state
3676  *
3677  *	Issues a command to establish communication with FW.
3678  */
3679 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
3680 		enum dev_master master, enum dev_state *state)
3681 {
3682 	int ret;
3683 	struct fw_hello_cmd c;
3684 
3685 	memset(&c, 0, sizeof(c));
3686 	INIT_CMD(c, HELLO, WRITE);
3687 	c.err_to_mbasyncnot = htonl(
3688 		V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
3689 		V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
3690 		V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
3691 			M_FW_HELLO_CMD_MBMASTER) |
3692 		V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
3693 
3694 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3695 	if (ret == 0 && state) {
3696 		u32 v = ntohl(c.err_to_mbasyncnot);
3697 		if (v & F_FW_HELLO_CMD_INIT)
3698 			*state = DEV_STATE_INIT;
3699 		else if (v & F_FW_HELLO_CMD_ERR)
3700 			*state = DEV_STATE_ERR;
3701 		else
3702 			*state = DEV_STATE_UNINIT;
3703 		return G_FW_HELLO_CMD_MBMASTER(v);
3704 	}
3705 	return ret;
3706 }
3707 
3708 /**
3709  *	t4_fw_bye - end communication with FW
3710  *	@adap: the adapter
3711  *	@mbox: mailbox to use for the FW command
3712  *
3713  *	Issues a command to terminate communication with FW.
3714  */
3715 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
3716 {
3717 	struct fw_bye_cmd c;
3718 
3719 	memset(&c, 0, sizeof(c));
3720 	INIT_CMD(c, BYE, WRITE);
3721 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3722 }
3723 
3724 /**
3725  *	t4_init_cmd - ask FW to initialize the device
3726  *	@adap: the adapter
3727  *	@mbox: mailbox to use for the FW command
3728  *
3729  *	Issues a command to FW to partially initialize the device.  This
3730  *	performs initialization that generally doesn't depend on user input.
3731  */
3732 int t4_early_init(struct adapter *adap, unsigned int mbox)
3733 {
3734 	struct fw_initialize_cmd c;
3735 
3736 	memset(&c, 0, sizeof(c));
3737 	INIT_CMD(c, INITIALIZE, WRITE);
3738 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3739 }
3740 
3741 /**
3742  *	t4_fw_reset - issue a reset to FW
3743  *	@adap: the adapter
3744  *	@mbox: mailbox to use for the FW command
3745  *	@reset: specifies the type of reset to perform
3746  *
3747  *	Issues a reset command of the specified type to FW.
3748  */
3749 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
3750 {
3751 	struct fw_reset_cmd c;
3752 
3753 	memset(&c, 0, sizeof(c));
3754 	INIT_CMD(c, RESET, WRITE);
3755 	c.val = htonl(reset);
3756 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3757 }
3758 
3759 /**
3760  *	t4_query_params - query FW or device parameters
3761  *	@adap: the adapter
3762  *	@mbox: mailbox to use for the FW command
3763  *	@pf: the PF
3764  *	@vf: the VF
3765  *	@nparams: the number of parameters
3766  *	@params: the parameter names
3767  *	@val: the parameter values
3768  *
3769  *	Reads the value of FW or device parameters.  Up to 7 parameters can be
3770  *	queried at once.
3771  */
3772 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3773 		    unsigned int vf, unsigned int nparams, const u32 *params,
3774 		    u32 *val)
3775 {
3776 	int i, ret;
3777 	struct fw_params_cmd c;
3778 	__be32 *p = &c.param[0].mnem;
3779 
3780 	if (nparams > 7)
3781 		return -EINVAL;
3782 
3783 	memset(&c, 0, sizeof(c));
3784 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
3785 			    F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) |
3786 			    V_FW_PARAMS_CMD_VFN(vf));
3787 	c.retval_len16 = htonl(FW_LEN16(c));
3788 
3789 	for (i = 0; i < nparams; i++, p += 2)
3790 		*p = htonl(*params++);
3791 
3792 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3793 	if (ret == 0)
3794 		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3795 			*val++ = ntohl(*p);
3796 	return ret;
3797 }
3798 
3799 /**
3800  *	t4_set_params - sets FW or device parameters
3801  *	@adap: the adapter
3802  *	@mbox: mailbox to use for the FW command
3803  *	@pf: the PF
3804  *	@vf: the VF
3805  *	@nparams: the number of parameters
3806  *	@params: the parameter names
3807  *	@val: the parameter values
3808  *
3809  *	Sets the value of FW or device parameters.  Up to 7 parameters can be
3810  *	specified at once.
3811  */
3812 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3813 		  unsigned int vf, unsigned int nparams, const u32 *params,
3814 		  const u32 *val)
3815 {
3816 	struct fw_params_cmd c;
3817 	__be32 *p = &c.param[0].mnem;
3818 
3819 	if (nparams > 7)
3820 		return -EINVAL;
3821 
3822 	memset(&c, 0, sizeof(c));
3823 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
3824 			    F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) |
3825 			    V_FW_PARAMS_CMD_VFN(vf));
3826 	c.retval_len16 = htonl(FW_LEN16(c));
3827 
3828 	while (nparams--) {
3829 		*p++ = htonl(*params++);
3830 		*p++ = htonl(*val++);
3831 	}
3832 
3833 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3834 }
3835 
3836 /**
3837  *	t4_cfg_pfvf - configure PF/VF resource limits
3838  *	@adap: the adapter
3839  *	@mbox: mailbox to use for the FW command
3840  *	@pf: the PF being configured
3841  *	@vf: the VF being configured
3842  *	@txq: the max number of egress queues
3843  *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
3844  *	@rxqi: the max number of interrupt-capable ingress queues
3845  *	@rxq: the max number of interruptless ingress queues
3846  *	@tc: the PCI traffic class
3847  *	@vi: the max number of virtual interfaces
3848  *	@cmask: the channel access rights mask for the PF/VF
3849  *	@pmask: the port access rights mask for the PF/VF
3850  *	@nexact: the maximum number of exact MPS filters
3851  *	@rcaps: read capabilities
3852  *	@wxcaps: write/execute capabilities
3853  *
3854  *	Configures resource limits and capabilities for a physical or virtual
3855  *	function.
3856  */
3857 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
3858 		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
3859 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
3860 		unsigned int vi, unsigned int cmask, unsigned int pmask,
3861 		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
3862 {
3863 	struct fw_pfvf_cmd c;
3864 
3865 	memset(&c, 0, sizeof(c));
3866 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
3867 			    F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
3868 			    V_FW_PFVF_CMD_VFN(vf));
3869 	c.retval_len16 = htonl(FW_LEN16(c));
3870 	c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
3871 			       V_FW_PFVF_CMD_NIQ(rxq));
3872 	c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) |
3873 			      V_FW_PFVF_CMD_PMASK(pmask) |
3874 			      V_FW_PFVF_CMD_NEQ(txq));
3875 	c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) |
3876 				V_FW_PFVF_CMD_NEXACTF(nexact));
3877 	c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) |
3878 				     V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
3879 				     V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
3880 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3881 }
3882 
3883 /**
3884  *	t4_alloc_vi - allocate a virtual interface
3885  *	@adap: the adapter
3886  *	@mbox: mailbox to use for the FW command
3887  *	@port: physical port associated with the VI
3888  *	@pf: the PF owning the VI
3889  *	@vf: the VF owning the VI
3890  *	@nmac: number of MAC addresses needed (1 to 5)
3891  *	@mac: the MAC addresses of the VI
3892  *	@rss_size: size of RSS table slice associated with this VI
3893  *
3894  *	Allocates a virtual interface for the given physical port.  If @mac is
3895  *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
3896  *	@mac should be large enough to hold @nmac Ethernet addresses, they are
3897  *	stored consecutively so the space needed is @nmac * 6 bytes.
3898  *	Returns a negative error number or the non-negative VI id.
3899  */
3900 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
3901 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
3902 		unsigned int *rss_size)
3903 {
3904 	int ret;
3905 	struct fw_vi_cmd c;
3906 
3907 	memset(&c, 0, sizeof(c));
3908 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
3909 			    F_FW_CMD_WRITE | F_FW_CMD_EXEC |
3910 			    V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
3911 	c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
3912 	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
3913 	c.nmac = nmac - 1;
3914 
3915 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3916 	if (ret)
3917 		return ret;
3918 
3919 	if (mac) {
3920 		memcpy(mac, c.mac, sizeof(c.mac));
3921 		switch (nmac) {
3922 		case 5:
3923 			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
3924 		case 4:
3925 			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
3926 		case 3:
3927 			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
3928 		case 2:
3929 			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
3930 		}
3931 	}
3932 	if (rss_size)
3933 		*rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.rsssize_pkd));
3934 	return G_FW_VI_CMD_VIID(ntohs(c.type_to_viid));
3935 }
3936 
3937 /**
3938  *	t4_free_vi - free a virtual interface
3939  *	@adap: the adapter
3940  *	@mbox: mailbox to use for the FW command
3941  *	@pf: the PF owning the VI
3942  *	@vf: the VF owning the VI
3943  *	@viid: virtual interface identifiler
3944  *
3945  *	Free a previously allocated virtual interface.
3946  */
3947 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
3948 	       unsigned int vf, unsigned int viid)
3949 {
3950 	struct fw_vi_cmd c;
3951 
3952 	memset(&c, 0, sizeof(c));
3953 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) |
3954 			    F_FW_CMD_REQUEST |
3955 			    F_FW_CMD_EXEC |
3956 			    V_FW_VI_CMD_PFN(pf) |
3957 			    V_FW_VI_CMD_VFN(vf));
3958 	c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c));
3959 	c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid));
3960 
3961 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3962 }
3963 
3964 /**
3965  *	t4_set_rxmode - set Rx properties of a virtual interface
3966  *	@adap: the adapter
3967  *	@mbox: mailbox to use for the FW command
3968  *	@viid: the VI id
3969  *	@mtu: the new MTU or -1
3970  *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
3971  *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
3972  *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
3973  *	@vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change
3974  *	@sleep_ok: if true we may sleep while awaiting command completion
3975  *
3976  *	Sets Rx properties of a virtual interface.
3977  */
3978 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
3979 		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
3980 		  bool sleep_ok)
3981 {
3982 	struct fw_vi_rxmode_cmd c;
3983 
3984 	/* convert to FW values */
3985 	if (mtu < 0)
3986 		mtu = M_FW_VI_RXMODE_CMD_MTU;
3987 	if (promisc < 0)
3988 		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
3989 	if (all_multi < 0)
3990 		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
3991 	if (bcast < 0)
3992 		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
3993 	if (vlanex < 0)
3994 		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
3995 
3996 	memset(&c, 0, sizeof(c));
3997 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST |
3998 			     F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid));
3999 	c.retval_len16 = htonl(FW_LEN16(c));
4000 	c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) |
4001 				  V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
4002 				  V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
4003 				  V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
4004 				  V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
4005 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4006 }
4007 
4008 /**
4009  *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
4010  *	@adap: the adapter
4011  *	@mbox: mailbox to use for the FW command
4012  *	@viid: the VI id
4013  *	@free: if true any existing filters for this VI id are first removed
4014  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
4015  *	@addr: the MAC address(es)
4016  *	@idx: where to store the index of each allocated filter
4017  *	@hash: pointer to hash address filter bitmap
4018  *	@sleep_ok: call is allowed to sleep
4019  *
4020  *	Allocates an exact-match filter for each of the supplied addresses and
4021  *	sets it to the corresponding address.  If @idx is not %NULL it should
4022  *	have at least @naddr entries, each of which will be set to the index of
4023  *	the filter allocated for the corresponding MAC address.  If a filter
4024  *	could not be allocated for an address its index is set to 0xffff.
4025  *	If @hash is not %NULL addresses that fail to allocate an exact filter
4026  *	are hashed and update the hash filter bitmap pointed at by @hash.
4027  *
4028  *	Returns a negative error number or the number of filters allocated.
4029  */
4030 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
4031 		      unsigned int viid, bool free, unsigned int naddr,
4032 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
4033 {
4034 	int offset, ret = 0;
4035 	struct fw_vi_mac_cmd c;
4036 	unsigned int nfilters = 0;
4037 	unsigned int rem = naddr;
4038 
4039 	if (naddr > FW_CLS_TCAM_NUM_ENTRIES)
4040 		return -EINVAL;
4041 
4042 	for (offset = 0; offset < naddr ; /**/) {
4043 		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
4044 					 ? rem
4045 					 : ARRAY_SIZE(c.u.exact));
4046 		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
4047 						     u.exact[fw_naddr]), 16);
4048 		struct fw_vi_mac_exact *p;
4049 		int i;
4050 
4051 		memset(&c, 0, sizeof(c));
4052 		c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4053 				     F_FW_CMD_REQUEST |
4054 				     F_FW_CMD_WRITE |
4055 				     V_FW_CMD_EXEC(free) |
4056 				     V_FW_VI_MAC_CMD_VIID(viid));
4057 		c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) |
4058 					    V_FW_CMD_LEN16(len16));
4059 
4060 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4061 			p->valid_to_idx = htons(
4062 				F_FW_VI_MAC_CMD_VALID |
4063 				V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
4064 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
4065 		}
4066 
4067 		/*
4068 		 * It's okay if we run out of space in our MAC address arena.
4069 		 * Some of the addresses we submit may get stored so we need
4070 		 * to run through the reply to see what the results were ...
4071 		 */
4072 		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
4073 		if (ret && ret != -FW_ENOMEM)
4074 			break;
4075 
4076 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4077 			u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
4078 
4079 			if (idx)
4080 				idx[offset+i] = (index >= FW_CLS_TCAM_NUM_ENTRIES
4081 						 ? 0xffff
4082 						 : index);
4083 			if (index < FW_CLS_TCAM_NUM_ENTRIES)
4084 				nfilters++;
4085 			else if (hash)
4086 				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
4087 		}
4088 
4089 		free = false;
4090 		offset += fw_naddr;
4091 		rem -= fw_naddr;
4092 	}
4093 
4094 	if (ret == 0 || ret == -FW_ENOMEM)
4095 		ret = nfilters;
4096 	return ret;
4097 }
4098 
4099 /**
4100  *	t4_change_mac - modifies the exact-match filter for a MAC address
4101  *	@adap: the adapter
4102  *	@mbox: mailbox to use for the FW command
4103  *	@viid: the VI id
4104  *	@idx: index of existing filter for old value of MAC address, or -1
4105  *	@addr: the new MAC address value
4106  *	@persist: whether a new MAC allocation should be persistent
4107  *	@add_smt: if true also add the address to the HW SMT
4108  *
4109  *	Modifies an exact-match filter and sets it to the new MAC address if
4110  *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
4111  *	latter case the address is added persistently if @persist is %true.
4112  *
4113  *	Note that in general it is not possible to modify the value of a given
4114  *	filter so the generic way to modify an address filter is to free the one
4115  *	being used by the old address value and allocate a new filter for the
4116  *	new address value.
4117  *
4118  *	Returns a negative error number or the index of the filter with the new
4119  *	MAC value.  Note that this index may differ from @idx.
4120  */
4121 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
4122 		  int idx, const u8 *addr, bool persist, bool add_smt)
4123 {
4124 	int ret, mode;
4125 	struct fw_vi_mac_cmd c;
4126 	struct fw_vi_mac_exact *p = c.u.exact;
4127 
4128 	if (idx < 0)                             /* new allocation */
4129 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
4130 	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
4131 
4132 	memset(&c, 0, sizeof(c));
4133 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
4134 			     F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid));
4135 	c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1));
4136 	p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID |
4137 				V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
4138 				V_FW_VI_MAC_CMD_IDX(idx));
4139 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
4140 
4141 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4142 	if (ret == 0) {
4143 		ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
4144 		if (ret >= FW_CLS_TCAM_NUM_ENTRIES)
4145 			ret = -ENOMEM;
4146 	}
4147 	return ret;
4148 }
4149 
4150 /**
4151  *	t4_set_addr_hash - program the MAC inexact-match hash filter
4152  *	@adap: the adapter
4153  *	@mbox: mailbox to use for the FW command
4154  *	@viid: the VI id
4155  *	@ucast: whether the hash filter should also match unicast addresses
4156  *	@vec: the value to be written to the hash filter
4157  *	@sleep_ok: call is allowed to sleep
4158  *
4159  *	Sets the 64-bit inexact-match hash filter for a virtual interface.
4160  */
4161 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
4162 		     bool ucast, u64 vec, bool sleep_ok)
4163 {
4164 	struct fw_vi_mac_cmd c;
4165 
4166 	memset(&c, 0, sizeof(c));
4167 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
4168 			     F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid));
4169 	c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN |
4170 				    V_FW_VI_MAC_CMD_HASHUNIEN(ucast) |
4171 				    V_FW_CMD_LEN16(1));
4172 	c.u.hash.hashvec = cpu_to_be64(vec);
4173 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4174 }
4175 
4176 /**
4177  *	t4_enable_vi - enable/disable a virtual interface
4178  *	@adap: the adapter
4179  *	@mbox: mailbox to use for the FW command
4180  *	@viid: the VI id
4181  *	@rx_en: 1=enable Rx, 0=disable Rx
4182  *	@tx_en: 1=enable Tx, 0=disable Tx
4183  *
4184  *	Enables/disables a virtual interface.
4185  */
4186 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
4187 		 bool rx_en, bool tx_en)
4188 {
4189 	struct fw_vi_enable_cmd c;
4190 
4191 	memset(&c, 0, sizeof(c));
4192 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
4193 			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
4194 	c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
4195 			       V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
4196 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4197 }
4198 
4199 /**
4200  *	t4_identify_port - identify a VI's port by blinking its LED
4201  *	@adap: the adapter
4202  *	@mbox: mailbox to use for the FW command
4203  *	@viid: the VI id
4204  *	@nblinks: how many times to blink LED at 2.5 Hz
4205  *
4206  *	Identifies a VI's port by blinking its LED.
4207  */
4208 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
4209 		     unsigned int nblinks)
4210 {
4211 	struct fw_vi_enable_cmd c;
4212 
4213 	memset(&c, 0, sizeof(c));
4214 	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
4215 			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
4216 	c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
4217 	c.blinkdur = htons(nblinks);
4218 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4219 }
4220 
4221 /**
4222  *	t4_iq_start_stop - enable/disable an ingress queue and its FLs
4223  *	@adap: the adapter
4224  *	@mbox: mailbox to use for the FW command
4225  *	@start: %true to enable the queues, %false to disable them
4226  *	@pf: the PF owning the queues
4227  *	@vf: the VF owning the queues
4228  *	@iqid: ingress queue id
4229  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
4230  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
4231  *
4232  *	Starts or stops an ingress queue and its associated FLs, if any.
4233  */
4234 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
4235 		     unsigned int pf, unsigned int vf, unsigned int iqid,
4236 		     unsigned int fl0id, unsigned int fl1id)
4237 {
4238 	struct fw_iq_cmd c;
4239 
4240 	memset(&c, 0, sizeof(c));
4241 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4242 			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
4243 			    V_FW_IQ_CMD_VFN(vf));
4244 	c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) |
4245 				 V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
4246 	c.iqid = htons(iqid);
4247 	c.fl0id = htons(fl0id);
4248 	c.fl1id = htons(fl1id);
4249 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4250 }
4251 
4252 /**
4253  *	t4_iq_free - free an ingress queue and its FLs
4254  *	@adap: the adapter
4255  *	@mbox: mailbox to use for the FW command
4256  *	@pf: the PF owning the queues
4257  *	@vf: the VF owning the queues
4258  *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
4259  *	@iqid: ingress queue id
4260  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
4261  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
4262  *
4263  *	Frees an ingress queue and its associated FLs, if any.
4264  */
4265 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4266 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
4267 	       unsigned int fl0id, unsigned int fl1id)
4268 {
4269 	struct fw_iq_cmd c;
4270 
4271 	memset(&c, 0, sizeof(c));
4272 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4273 			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
4274 			    V_FW_IQ_CMD_VFN(vf));
4275 	c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c));
4276 	c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype));
4277 	c.iqid = htons(iqid);
4278 	c.fl0id = htons(fl0id);
4279 	c.fl1id = htons(fl1id);
4280 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4281 }
4282 
4283 /**
4284  *	t4_eth_eq_free - free an Ethernet egress queue
4285  *	@adap: the adapter
4286  *	@mbox: mailbox to use for the FW command
4287  *	@pf: the PF owning the queue
4288  *	@vf: the VF owning the queue
4289  *	@eqid: egress queue id
4290  *
4291  *	Frees an Ethernet egress queue.
4292  */
4293 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4294 		   unsigned int vf, unsigned int eqid)
4295 {
4296 	struct fw_eq_eth_cmd c;
4297 
4298 	memset(&c, 0, sizeof(c));
4299 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
4300 			    F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) |
4301 			    V_FW_EQ_ETH_CMD_VFN(vf));
4302 	c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
4303 	c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid));
4304 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4305 }
4306 
4307 /**
4308  *	t4_ctrl_eq_free - free a control egress queue
4309  *	@adap: the adapter
4310  *	@mbox: mailbox to use for the FW command
4311  *	@pf: the PF owning the queue
4312  *	@vf: the VF owning the queue
4313  *	@eqid: egress queue id
4314  *
4315  *	Frees a control egress queue.
4316  */
4317 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4318 		    unsigned int vf, unsigned int eqid)
4319 {
4320 	struct fw_eq_ctrl_cmd c;
4321 
4322 	memset(&c, 0, sizeof(c));
4323 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
4324 			    F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) |
4325 			    V_FW_EQ_CTRL_CMD_VFN(vf));
4326 	c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
4327 	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid));
4328 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4329 }
4330 
4331 /**
4332  *	t4_ofld_eq_free - free an offload egress queue
4333  *	@adap: the adapter
4334  *	@mbox: mailbox to use for the FW command
4335  *	@pf: the PF owning the queue
4336  *	@vf: the VF owning the queue
4337  *	@eqid: egress queue id
4338  *
4339  *	Frees a control egress queue.
4340  */
4341 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4342 		    unsigned int vf, unsigned int eqid)
4343 {
4344 	struct fw_eq_ofld_cmd c;
4345 
4346 	memset(&c, 0, sizeof(c));
4347 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
4348 			    F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) |
4349 			    V_FW_EQ_OFLD_CMD_VFN(vf));
4350 	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
4351 	c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid));
4352 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4353 }
4354 
4355 /**
4356  *	t4_handle_fw_rpl - process a FW reply message
4357  *	@adap: the adapter
4358  *	@rpl: start of the FW message
4359  *
4360  *	Processes a FW message, such as link state change messages.
4361  */
4362 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
4363 {
4364 	u8 opcode = *(const u8 *)rpl;
4365 
4366 	if (opcode == FW_PORT_CMD) {    /* link/module state change message */
4367 		int speed = 0, fc = 0, i;
4368 		const struct fw_port_cmd *p = (const void *)rpl;
4369 		int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid));
4370 		struct port_info *pi = NULL;
4371 		struct link_config *lc;
4372 		u32 stat = ntohl(p->u.info.lstatus_to_modtype);
4373 		int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
4374 		u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
4375 
4376 		if (stat & F_FW_PORT_CMD_RXPAUSE)
4377 			fc |= PAUSE_RX;
4378 		if (stat & F_FW_PORT_CMD_TXPAUSE)
4379 			fc |= PAUSE_TX;
4380 		if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
4381 			speed = SPEED_100;
4382 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
4383 			speed = SPEED_1000;
4384 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
4385 			speed = SPEED_10000;
4386 
4387 		for_each_port(adap, i) {
4388 			pi = adap2pinfo(adap, i);
4389 			if (pi->tx_chan == chan)
4390 				break;
4391 		}
4392 		lc = &pi->link_cfg;
4393 
4394 		if (link_ok != lc->link_ok || speed != lc->speed ||
4395 		    fc != lc->fc) {                    /* something changed */
4396 			lc->link_ok = link_ok;
4397 			lc->speed = speed;
4398 			lc->fc = fc;
4399 			t4_os_link_changed(adap, i, link_ok);
4400 		}
4401 		if (mod != pi->mod_type) {
4402 			pi->mod_type = mod;
4403 			t4_os_portmod_changed(adap, i);
4404 		}
4405 	}
4406 	return 0;
4407 }
4408 
4409 /**
4410  *	get_pci_mode - determine a card's PCI mode
4411  *	@adapter: the adapter
4412  *	@p: where to store the PCI settings
4413  *
4414  *	Determines a card's PCI mode and associated parameters, such as speed
4415  *	and width.
4416  */
4417 static void __devinit get_pci_mode(struct adapter *adapter,
4418 				   struct pci_params *p)
4419 {
4420 	u16 val;
4421 	u32 pcie_cap;
4422 
4423 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
4424 	if (pcie_cap) {
4425 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
4426 		p->speed = val & PCI_EXP_LNKSTA_CLS;
4427 		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
4428 	}
4429 }
4430 
4431 /**
4432  *	init_link_config - initialize a link's SW state
4433  *	@lc: structure holding the link state
4434  *	@caps: link capabilities
4435  *
4436  *	Initializes the SW state maintained for each link, including the link's
4437  *	capabilities and default speed/flow-control/autonegotiation settings.
4438  */
4439 static void __devinit init_link_config(struct link_config *lc,
4440 				       unsigned int caps)
4441 {
4442 	lc->supported = caps;
4443 	lc->requested_speed = 0;
4444 	lc->speed = 0;
4445 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
4446 	if (lc->supported & FW_PORT_CAP_ANEG) {
4447 		lc->advertising = lc->supported & ADVERT_MASK;
4448 		lc->autoneg = AUTONEG_ENABLE;
4449 		lc->requested_fc |= PAUSE_AUTONEG;
4450 	} else {
4451 		lc->advertising = 0;
4452 		lc->autoneg = AUTONEG_DISABLE;
4453 	}
4454 }
4455 
4456 static int __devinit wait_dev_ready(struct adapter *adap)
4457 {
4458 	u32 whoami;
4459 
4460 	whoami = t4_read_reg(adap, A_PL_WHOAMI);
4461 
4462 	if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
4463 		return 0;
4464 
4465 	msleep(500);
4466 	whoami = t4_read_reg(adap, A_PL_WHOAMI);
4467 	return (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS
4468 		? 0 : -EIO);
4469 }
4470 
4471 static int __devinit get_flash_params(struct adapter *adapter)
4472 {
4473 	int ret;
4474 	u32 info = 0;
4475 
4476 	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
4477 	if (!ret)
4478 		ret = sf1_read(adapter, 3, 0, 1, &info);
4479 	t4_write_reg(adapter, A_SF_OP, 0);               /* unlock SF */
4480 	if (ret < 0)
4481 		return ret;
4482 
4483 	if ((info & 0xff) != 0x20)             /* not a Numonix flash */
4484 		return -EINVAL;
4485 	info >>= 16;                           /* log2 of size */
4486 	if (info >= 0x14 && info < 0x18)
4487 		adapter->params.sf_nsec = 1 << (info - 16);
4488 	else if (info == 0x18)
4489 		adapter->params.sf_nsec = 64;
4490 	else
4491 		return -EINVAL;
4492 	adapter->params.sf_size = 1 << info;
4493 	return 0;
4494 }
4495 
4496 /**
4497  *	t4_prep_adapter - prepare SW and HW for operation
4498  *	@adapter: the adapter
4499  *	@reset: if true perform a HW reset
4500  *
4501  *	Initialize adapter SW state for the various HW modules, set initial
4502  *	values for some adapter tunables, take PHYs out of reset, and
4503  *	initialize the MDIO interface.
4504  */
4505 int __devinit t4_prep_adapter(struct adapter *adapter)
4506 {
4507 	int ret;
4508 
4509 	ret = wait_dev_ready(adapter);
4510 	if (ret < 0)
4511 		return ret;
4512 
4513 	get_pci_mode(adapter, &adapter->params.pci);
4514 
4515 	adapter->params.rev = t4_read_reg(adapter, A_PL_REV);
4516 	adapter->params.pci.vpd_cap_addr =
4517 		t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
4518 
4519 	ret = get_flash_params(adapter);
4520 	if (ret < 0)
4521 		return ret;
4522 
4523 	ret = get_vpd_params(adapter, &adapter->params.vpd);
4524 	if (ret < 0)
4525 		return ret;
4526 
4527 	if (t4_read_reg(adapter, A_SGE_PC0_REQ_BIST_CMD) != 0xffffffff) {
4528 		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
4529 	} else {
4530 		adapter->params.cim_la_size = CIMLA_SIZE;
4531 	}
4532 
4533 	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4534 
4535 	/*
4536 	 * Default port and clock for debugging in case we can't reach FW.
4537 	 */
4538 	adapter->params.nports = 1;
4539 	adapter->params.portvec = 1;
4540 	adapter->params.vpd.cclk = 50000;
4541 
4542 	return 0;
4543 }
4544 
4545 int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf)
4546 {
4547 	u8 addr[6];
4548 	int ret, i, j;
4549 	struct fw_port_cmd c;
4550 	unsigned int rss_size;
4551 	adapter_t *adap = p->adapter;
4552 
4553 	memset(&c, 0, sizeof(c));
4554 
4555 	for (i = 0, j = -1; i <= p->port_id; i++) {
4556 		do {
4557 			j++;
4558 		} while ((adap->params.portvec & (1 << j)) == 0);
4559 	}
4560 
4561 	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
4562 			       F_FW_CMD_REQUEST | F_FW_CMD_READ |
4563 			       V_FW_PORT_CMD_PORTID(j));
4564 	c.action_to_len16 = htonl(
4565 		V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
4566 		FW_LEN16(c));
4567 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4568 	if (ret)
4569 		return ret;
4570 
4571 	ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
4572 	if (ret < 0)
4573 		return ret;
4574 
4575 	p->viid = ret;
4576 	p->tx_chan = j;
4577 	p->lport = j;
4578 	p->rss_size = rss_size;
4579 	t4_os_set_hw_addr(adap, p->port_id, addr);
4580 
4581 	ret = ntohl(c.u.info.lstatus_to_modtype);
4582 	p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
4583 		G_FW_PORT_CMD_MDIOADDR(ret) : -1;
4584 	p->port_type = G_FW_PORT_CMD_PTYPE(ret);
4585 	p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
4586 
4587 	init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
4588 
4589 	return 0;
4590 }
4591