1 /*	$NetBSD: pdq.c,v 1.40 2013/09/15 09:26:39 martin Exp $	*/
2 
3 /*-
4  * Copyright (c) 1995,1996 Matt Thomas <matt@3am-software.com>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. The name of the author may not be used to endorse or promote products
13  *    derived from this software without specific prior written permission
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * Id: pdq.c,v 1.32 1997/06/05 01:56:35 thomas Exp
27  *
28  */
29 
30 /*
31  * DEC PDQ FDDI Controller O/S independent code
32  *
33  * This module should work any on PDQ based board.  Note that changes for
34  * MIPS and Alpha architectures (or any other architecture which requires
35  * a flushing of memory or write buffers and/or has incoherent caches)
36  * have yet to be made.
37  *
38  * However, it is expected that the PDQ_CSR_WRITE macro will cause a
39  * flushing of the write buffers.
40  */
41 
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: pdq.c,v 1.40 2013/09/15 09:26:39 martin Exp $");
44 
45 #define	PDQ_HWSUPPORT	/* for pdq.h */
46 
47 #if defined(__FreeBSD__)
48 /*
49  * What a botch having to specific includes for FreeBSD!
50  */
51 #include <dev/pdq/pdqvar.h>
52 #include <dev/pdq/pdqreg.h>
53 #else
54 #include "pdqvar.h"
55 #include "pdqreg.h"
56 #endif
57 
58 #define	PDQ_ROUNDUP(n, x)	(((n) + ((x) - 1)) & ~((x) - 1))
59 #define	PDQ_CMD_RX_ALIGNMENT	16
60 
61 #if (defined(PDQTEST) && !defined(PDQ_NOPRINTF)) || defined(PDQVERBOSE)
62 #define	PDQ_PRINTF(x)	printf x
63 #else
64 #define	PDQ_PRINTF(x)	do { } while (0)
65 #endif
66 
67 static const char * const pdq_halt_codes[] = {
68     "Selftest Timeout", "Host Bus Parity Error", "Host Directed Fault",
69     "Software Fault", "Hardware Fault", "PC Trace Path Test",
70     "DMA Error", "Image CRC Error", "Adapter Processer Error"
71 };
72 
73 static const char * const pdq_adapter_states[] = {
74     "Reset", "Upgrade", "DMA Unavailable", "DMA Available",
75     "Link Available", "Link Unavailable", "Halted", "Ring Member"
76 };
77 
78 /*
79  * The following are used in conjunction with
80  * unsolicited events
81  */
82 static const char * const pdq_entities[] = {
83     "Station", "Link", "Phy Port"
84 };
85 
86 static const char * const pdq_station_events[] = {
87     "Unknown Event #0",
88     "Trace Received"
89 };
90 
91 static const char * const pdq_link_events[] = {
92     "Transmit Underrun",
93     "Transmit Failed",
94     "Block Check Error (CRC)",
95     "Frame Status Error",
96     "PDU Length Error",
97     NULL,
98     NULL,
99     "Receive Data Overrun",
100     NULL,
101     "No User Buffer",
102     "Ring Initialization Initiated",
103     "Ring Initialization Received",
104     "Ring Beacon Initiated",
105     "Duplicate Address Failure",
106     "Duplicate Token Detected",
107     "Ring Purger Error",
108     "FCI Strip Error",
109     "Trace Initiated",
110     "Directed Beacon Received",
111 };
112 
113 #if 0
114 static const char * const pdq_station_arguments[] = {
115     "Reason"
116 };
117 
118 static const char * const pdq_link_arguments[] = {
119     "Reason",
120     "Data Link Header",
121     "Source",
122     "Upstream Neighbor"
123 };
124 
125 static const char * const pdq_phy_arguments[] = {
126     "Direction"
127 };
128 
129 static const char * const * const pdq_event_arguments[] = {
130     pdq_station_arguments,
131     pdq_link_arguments,
132     pdq_phy_arguments
133 };
134 
135 #endif
136 
137 
138 static const char * const pdq_phy_events[] = {
139     "LEM Error Monitor Reject",
140     "Elasticy Buffer Error",
141     "Link Confidence Test Reject"
142 };
143 
144 static const char * const * const pdq_event_codes[] = {
145     pdq_station_events,
146     pdq_link_events,
147     pdq_phy_events
148 };
149 
150 static const char * const pdq_station_types[] = {
151     "SAS", "DAC", "SAC", "NAC", "DAS"
152 };
153 
154 static const char * const pdq_smt_versions[] = { "", "V6.2", "V7.2", "V7.3" };
155 
156 static const char pdq_phy_types[] = "ABSM";
157 
158 static const char * const pdq_pmd_types0[] = {
159     "ANSI Multi-Mode", "ANSI Single-Mode Type 1", "ANSI Single-Mode Type 2",
160     "ANSI Sonet"
161 };
162 
163 static const char * const pdq_pmd_types100[] = {
164     "Low Power", "Thin Wire", "Shielded Twisted Pair",
165     "Unshielded Twisted Pair"
166 };
167 
168 static const char * const * const pdq_pmd_types[] = {
169     pdq_pmd_types0, pdq_pmd_types100
170 };
171 
172 static const char * const pdq_descriptions[] = {
173     "DEFPA PCI",
174     "DEFEA EISA",
175     "DEFTA TC",
176     "DEFAA Futurebus",
177     "DEFQA Q-bus",
178 };
179 
180 static void
pdq_print_fddi_chars(pdq_t * pdq,const pdq_response_status_chars_get_t * rsp)181 pdq_print_fddi_chars(
182     pdq_t *pdq,
183     const pdq_response_status_chars_get_t *rsp)
184 {
185     pdq_uint32_t phy_type;
186     pdq_uint32_t pmd_type;
187     pdq_uint32_t smt_version_id;
188     pdq_station_type_t station_type;
189 
190     printf(
191 #if !defined(__bsdi__) && !defined(__NetBSD__)
192 	   PDQ_OS_PREFIX
193 #else
194 	   ": "
195 #endif
196 	   "DEC %s FDDI %s Controller\n",
197 #if !defined(__bsdi__) && !defined(__NetBSD__)
198 	   PDQ_OS_PREFIX_ARGS,
199 #endif
200 	   pdq_descriptions[pdq->pdq_type],
201 	   pdq_station_types[le32toh(rsp->status_chars_get.station_type)]);
202 
203     printf(PDQ_OS_PREFIX "FDDI address %c%c:%c%c:%c%c:%c%c:%c%c:%c%c, FW=%c%c%c%c, HW=%c",
204 	   PDQ_OS_PREFIX_ARGS,
205 	   hexdigits[pdq->pdq_hwaddr.lanaddr_bytes[0] >> 4],
206 	   hexdigits[pdq->pdq_hwaddr.lanaddr_bytes[0] & 0x0F],
207 	   hexdigits[pdq->pdq_hwaddr.lanaddr_bytes[1] >> 4],
208 	   hexdigits[pdq->pdq_hwaddr.lanaddr_bytes[1] & 0x0F],
209 	   hexdigits[pdq->pdq_hwaddr.lanaddr_bytes[2] >> 4],
210 	   hexdigits[pdq->pdq_hwaddr.lanaddr_bytes[2] & 0x0F],
211 	   hexdigits[pdq->pdq_hwaddr.lanaddr_bytes[3] >> 4],
212 	   hexdigits[pdq->pdq_hwaddr.lanaddr_bytes[3] & 0x0F],
213 	   hexdigits[pdq->pdq_hwaddr.lanaddr_bytes[4] >> 4],
214 	   hexdigits[pdq->pdq_hwaddr.lanaddr_bytes[4] & 0x0F],
215 	   hexdigits[pdq->pdq_hwaddr.lanaddr_bytes[5] >> 4],
216 	   hexdigits[pdq->pdq_hwaddr.lanaddr_bytes[5] & 0x0F],
217 	   pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1],
218 	   pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3],
219 	   rsp->status_chars_get.module_rev.fwrev_bytes[0]);
220 
221     phy_type = le32toh(rsp->status_chars_get.phy_type[0]);
222     pmd_type = le32toh(rsp->status_chars_get.pmd_type[0]);
223     station_type = le32toh(rsp->status_chars_get.station_type);
224     smt_version_id = le32toh(rsp->status_chars_get.smt_version_id);
225 
226     if (smt_version_id < PDQ_ARRAY_SIZE(pdq_smt_versions))
227 	printf(", SMT %s\n", pdq_smt_versions[smt_version_id]);
228 
229     printf(PDQ_OS_PREFIX "FDDI Port%s = %c (PMD = %s)",
230 	   PDQ_OS_PREFIX_ARGS,
231 	   le32toh(station_type) == PDQ_STATION_TYPE_DAS ? "[A]" : "",
232 	   pdq_phy_types[phy_type],
233 	   pdq_pmd_types[pmd_type / 100][pmd_type % 100]);
234 
235     if (station_type == PDQ_STATION_TYPE_DAS) {
236 	phy_type = le32toh(rsp->status_chars_get.phy_type[1]);
237 	pmd_type = le32toh(rsp->status_chars_get.pmd_type[1]);
238 	printf(", FDDI Port[B] = %c (PMD = %s)",
239 	       pdq_phy_types[phy_type],
240 	       pdq_pmd_types[pmd_type / 100][pmd_type % 100]);
241     }
242 
243     printf("\n");
244 
245     pdq_os_update_status(pdq, rsp);
246 }
247 
248 static void
pdq_init_csrs(pdq_csrs_t * csrs,pdq_bus_t bus,pdq_bus_memaddr_t csr_base,size_t csrsize)249 pdq_init_csrs(
250     pdq_csrs_t *csrs,
251     pdq_bus_t bus,
252     pdq_bus_memaddr_t csr_base,
253     size_t csrsize)
254 {
255     csrs->csr_bus = bus;
256     csrs->csr_base = csr_base;
257     csrs->csr_port_reset		= PDQ_CSR_OFFSET(csr_base,  0 * csrsize);
258     csrs->csr_host_data			= PDQ_CSR_OFFSET(csr_base,  1 * csrsize);
259     csrs->csr_port_control		= PDQ_CSR_OFFSET(csr_base,  2 * csrsize);
260     csrs->csr_port_data_a		= PDQ_CSR_OFFSET(csr_base,  3 * csrsize);
261     csrs->csr_port_data_b		= PDQ_CSR_OFFSET(csr_base,  4 * csrsize);
262     csrs->csr_port_status		= PDQ_CSR_OFFSET(csr_base,  5 * csrsize);
263     csrs->csr_host_int_type_0		= PDQ_CSR_OFFSET(csr_base,  6 * csrsize);
264     csrs->csr_host_int_enable		= PDQ_CSR_OFFSET(csr_base,  7 * csrsize);
265     csrs->csr_type_2_producer		= PDQ_CSR_OFFSET(csr_base,  8 * csrsize);
266     csrs->csr_cmd_response_producer	= PDQ_CSR_OFFSET(csr_base, 10 * csrsize);
267     csrs->csr_cmd_request_producer	= PDQ_CSR_OFFSET(csr_base, 11 * csrsize);
268     csrs->csr_host_smt_producer		= PDQ_CSR_OFFSET(csr_base, 12 * csrsize);
269     csrs->csr_unsolicited_producer	= PDQ_CSR_OFFSET(csr_base, 13 * csrsize);
270 }
271 
272 static void
pdq_init_pci_csrs(pdq_pci_csrs_t * csrs,pdq_bus_t bus,pdq_bus_memaddr_t csr_base,size_t csrsize)273 pdq_init_pci_csrs(
274     pdq_pci_csrs_t *csrs,
275     pdq_bus_t bus,
276     pdq_bus_memaddr_t csr_base,
277     size_t csrsize)
278 {
279     csrs->csr_bus = bus;
280     csrs->csr_base = csr_base;
281     csrs->csr_pfi_mode_control	= PDQ_CSR_OFFSET(csr_base, 16 * csrsize);
282     csrs->csr_pfi_status	= PDQ_CSR_OFFSET(csr_base, 17 * csrsize);
283     csrs->csr_fifo_write	= PDQ_CSR_OFFSET(csr_base, 18 * csrsize);
284     csrs->csr_fifo_read		= PDQ_CSR_OFFSET(csr_base, 19 * csrsize);
285 }
286 
287 static void
pdq_flush_databuf_queue(pdq_t * pdq,pdq_databuf_queue_t * q)288 pdq_flush_databuf_queue(
289     pdq_t *pdq,
290     pdq_databuf_queue_t *q)
291 {
292     PDQ_OS_DATABUF_T *pdu;
293     for (;;) {
294 	PDQ_OS_DATABUF_DEQUEUE(q, pdu);
295 	if (pdu == NULL)
296 	    return;
297 	PDQ_OS_DATABUF_FREE(pdq, pdu);
298     }
299 }
300 
301 static pdq_boolean_t
pdq_do_port_control(const pdq_csrs_t * const csrs,pdq_uint32_t cmd)302 pdq_do_port_control(
303     const pdq_csrs_t * const csrs,
304     pdq_uint32_t cmd)
305 {
306     int cnt = 0;
307     PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE);
308     PDQ_CSR_WRITE(csrs, csr_port_control, PDQ_PCTL_CMD_ERROR | cmd);
309     while ((PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) == 0 && cnt < 33000000)
310 	cnt++;
311     PDQ_PRINTF(("CSR cmd spun %d times\n", cnt));
312     if (PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) {
313 	PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE);
314 	return (PDQ_CSR_READ(csrs, csr_port_control) & PDQ_PCTL_CMD_ERROR) ? PDQ_FALSE : PDQ_TRUE;
315     }
316     /* adapter failure */
317     PDQ_ASSERT(0);
318     return PDQ_FALSE;
319 }
320 
321 static void
pdq_read_mla(const pdq_csrs_t * const csrs,pdq_lanaddr_t * hwaddr)322 pdq_read_mla(
323     const pdq_csrs_t * const csrs,
324     pdq_lanaddr_t *hwaddr)
325 {
326     pdq_uint32_t data;
327 
328     PDQ_CSR_WRITE(csrs, csr_port_data_a, 0);
329     pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ);
330     data = PDQ_CSR_READ(csrs, csr_host_data);
331 
332     hwaddr->lanaddr_bytes[0] = (data >> 0) & 0xFF;
333     hwaddr->lanaddr_bytes[1] = (data >> 8) & 0xFF;
334     hwaddr->lanaddr_bytes[2] = (data >> 16) & 0xFF;
335     hwaddr->lanaddr_bytes[3] = (data >> 24) & 0xFF;
336 
337     PDQ_CSR_WRITE(csrs, csr_port_data_a, 1);
338     pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ);
339     data = PDQ_CSR_READ(csrs, csr_host_data);
340 
341     hwaddr->lanaddr_bytes[4] = (data >> 0) & 0xFF;
342     hwaddr->lanaddr_bytes[5] = (data >> 8) & 0xFF;
343 }
344 
345 static void
pdq_read_fwrev(const pdq_csrs_t * const csrs,pdq_fwrev_t * fwrev)346 pdq_read_fwrev(
347     const pdq_csrs_t * const csrs,
348     pdq_fwrev_t *fwrev)
349 {
350     pdq_uint32_t data;
351 
352     pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ);
353     data = PDQ_CSR_READ(csrs, csr_host_data);
354 
355     fwrev->fwrev_bytes[3] = (data >> 0) & 0xFF;
356     fwrev->fwrev_bytes[2] = (data >> 8) & 0xFF;
357     fwrev->fwrev_bytes[1] = (data >> 16) & 0xFF;
358     fwrev->fwrev_bytes[0] = (data >> 24) & 0xFF;
359 }
360 
361 static pdq_boolean_t
pdq_read_error_log(pdq_t * pdq,pdq_response_error_log_get_t * log_entry)362 pdq_read_error_log(
363     pdq_t *pdq,
364     pdq_response_error_log_get_t *log_entry)
365 {
366     const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
367     pdq_uint32_t *ptr = (pdq_uint32_t *) log_entry;
368 
369     pdq_do_port_control(csrs, PDQ_PCTL_ERROR_LOG_START);
370 
371     while (pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ) == PDQ_TRUE) {
372 	*ptr++ = PDQ_CSR_READ(csrs, csr_host_data);
373 	if ((pdq_uint8_t *) ptr - (pdq_uint8_t *) log_entry == sizeof(*log_entry))
374 	    break;
375     }
376     return (ptr == (pdq_uint32_t *) log_entry) ? PDQ_FALSE : PDQ_TRUE;
377 }
378 
379 static pdq_chip_rev_t
pdq_read_chiprev(const pdq_csrs_t * const csrs)380 pdq_read_chiprev(
381     const pdq_csrs_t * const csrs)
382 {
383     pdq_uint32_t data;
384 
385     PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_PDQ_REV_GET);
386     pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
387     data = PDQ_CSR_READ(csrs, csr_host_data);
388 
389     return (pdq_chip_rev_t) data;
390 }
391 
392 static const struct {
393     size_t cmd_len;
394     size_t rsp_len;
395     const char *cmd_name;
396 } pdq_cmd_info[] = {
397     { sizeof(pdq_cmd_generic_t),		/* 0 - PDQC_START */
398       sizeof(pdq_response_generic_t),
399       "Start"
400     },
401     { sizeof(pdq_cmd_filter_set_t),		/* 1 - PDQC_FILTER_SET */
402       sizeof(pdq_response_generic_t),
403       "Filter Set"
404     },
405     { sizeof(pdq_cmd_generic_t),		/* 2 - PDQC_FILTER_GET */
406       sizeof(pdq_response_filter_get_t),
407       "Filter Get"
408     },
409     { sizeof(pdq_cmd_chars_set_t),		/* 3 - PDQC_CHARS_SET */
410       sizeof(pdq_response_generic_t),
411       "Chars Set"
412     },
413     { sizeof(pdq_cmd_generic_t),		/* 4 - PDQC_STATUS_CHARS_GET */
414       sizeof(pdq_response_status_chars_get_t),
415       "Status Chars Get"
416     },
417 #if 0
418     { sizeof(pdq_cmd_generic_t),		/* 5 - PDQC_COUNTERS_GET */
419       sizeof(pdq_response_counters_get_t),
420       "Counters Get"
421     },
422     { sizeof(pdq_cmd_counters_set_t),		/* 6 - PDQC_COUNTERS_SET */
423       sizeof(pdq_response_generic_t),
424       "Counters Set"
425     },
426 #else
427     { 0, 0, "Counters Get" },
428     { 0, 0, "Counters Set" },
429 #endif
430     { sizeof(pdq_cmd_addr_filter_set_t),	/* 7 - PDQC_ADDR_FILTER_SET */
431       sizeof(pdq_response_generic_t),
432       "Addr Filter Set"
433     },
434     { sizeof(pdq_cmd_generic_t),		/* 8 - PDQC_ADDR_FILTER_GET */
435       sizeof(pdq_response_addr_filter_get_t),
436       "Addr Filter Get"
437     },
438     { sizeof(pdq_cmd_generic_t),		/* 9 - PDQC_ERROR_LOG_CLEAR */
439       sizeof(pdq_response_generic_t),
440       "Error Log Clear"
441     },
442     { sizeof(pdq_cmd_generic_t),		/* 10 - PDQC_ERROR_LOG_SET */
443       sizeof(pdq_response_generic_t),
444       "Error Log Set"
445     },
446     { sizeof(pdq_cmd_generic_t),		/* 11 - PDQC_FDDI_MIB_GET */
447       sizeof(pdq_response_generic_t),
448       "FDDI MIB Get"
449     },
450     { sizeof(pdq_cmd_generic_t),		/* 12 - PDQC_DEC_EXT_MIB_GET */
451       sizeof(pdq_response_generic_t),
452       "DEC Ext MIB Get"
453     },
454     { sizeof(pdq_cmd_generic_t),		/* 13 - PDQC_DEC_SPECIFIC_GET */
455       sizeof(pdq_response_generic_t),
456       "DEC Specific Get"
457     },
458     { sizeof(pdq_cmd_generic_t),		/* 14 - PDQC_SNMP_SET */
459       sizeof(pdq_response_generic_t),
460       "SNMP Set"
461     },
462     { 0, 0, "N/A" },
463     { sizeof(pdq_cmd_generic_t),		/* 16 - PDQC_SMT_MIB_GET */
464       sizeof(pdq_response_generic_t),
465       "SMT MIB Get"
466     },
467     { sizeof(pdq_cmd_generic_t),		/* 17 - PDQC_SMT_MIB_SET */
468       sizeof(pdq_response_generic_t),
469       "SMT MIB Set",
470     },
471     { 0, 0, "Bogus CMD" },
472 };
473 
474 static void
pdq_queue_commands(pdq_t * pdq)475 pdq_queue_commands(
476     pdq_t *pdq)
477 {
478     const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
479     pdq_command_info_t * const ci = &pdq->pdq_command_info;
480     pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
481     pdq_txdesc_t * const txd = &dbp->pdqdb_command_requests[ci->ci_request_producer];
482     pdq_cmd_code_t op;
483     pdq_uint32_t cmdlen, rsplen, mask;
484 
485     /*
486      * If there are commands or responses active or there aren't
487      * any pending commands, then don't queue any more.
488      */
489     if (ci->ci_command_active || ci->ci_pending_commands == 0)
490 	return;
491 
492     /*
493      * Determine which command needs to be queued.
494      */
495     op = PDQC_SMT_MIB_SET;
496     for (mask = 1 << ((int) op); (mask & ci->ci_pending_commands) == 0; mask >>= 1)
497 	op = (pdq_cmd_code_t) ((int) op - 1);
498     /*
499      * Obtain the sizes needed for the command and response.
500      * Round up to PDQ_CMD_RX_ALIGNMENT so the receive buffer is
501      * always properly aligned.
502      */
503     cmdlen = PDQ_ROUNDUP(pdq_cmd_info[op].cmd_len, PDQ_CMD_RX_ALIGNMENT);
504     rsplen = PDQ_ROUNDUP(pdq_cmd_info[op].rsp_len, PDQ_CMD_RX_ALIGNMENT);
505     if (cmdlen < rsplen)
506 	cmdlen = rsplen;
507     /*
508      * Since only one command at a time will be queued, there will always
509      * be enough space.
510      */
511 
512     /*
513      * Obtain and fill in the descriptor for the command (descriptor is
514      * pre-initialized)
515      */
516     txd->txd_pa_hi =
517 	htole32(PDQ_TXDESC_SEG_LEN(cmdlen)|PDQ_TXDESC_EOP|PDQ_TXDESC_SOP);
518 
519     /*
520      * Clear the command area, set the opcode, and the command from the pending
521      * mask.
522      */
523 
524     ci->ci_queued_commands[ci->ci_request_producer] = op;
525 #if defined(PDQVERBOSE)
526     ((pdq_response_generic_t *) ci->ci_response_bufstart)->generic_op =
527 	htole32(PDQC_BOGUS_CMD);
528 #endif
529     PDQ_OS_MEMZERO(ci->ci_request_bufstart, cmdlen);
530     *(pdq_uint32_t *) ci->ci_request_bufstart = htole32(op);
531     ci->ci_pending_commands &= ~mask;
532 
533     /*
534      * Fill in the command area, if needed.
535      */
536     switch (op) {
537 	case PDQC_FILTER_SET: {
538 	    pdq_cmd_filter_set_t *filter_set = (pdq_cmd_filter_set_t *) ci->ci_request_bufstart;
539 	    unsigned idx = 0;
540 	    filter_set->filter_set_items[idx].item_code =
541 		htole32(PDQI_IND_GROUP_PROM);
542 	    filter_set->filter_set_items[idx].filter_state =
543 		htole32(pdq->pdq_flags & PDQ_PROMISC ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
544 	    idx++;
545 	    filter_set->filter_set_items[idx].item_code =
546 		htole32(PDQI_GROUP_PROM);
547 	    filter_set->filter_set_items[idx].filter_state =
548 		htole32(pdq->pdq_flags & PDQ_ALLMULTI ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
549 	    idx++;
550 	    filter_set->filter_set_items[idx].item_code =
551 		htole32(PDQI_SMT_PROM);
552 	    filter_set->filter_set_items[idx].filter_state =
553 		htole32((pdq->pdq_flags & (PDQ_PROMISC|PDQ_PASS_SMT)) == (PDQ_PROMISC|PDQ_PASS_SMT) ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
554 	    idx++;
555 	    filter_set->filter_set_items[idx].item_code =
556 		htole32(PDQI_SMT_USER);
557 	    filter_set->filter_set_items[idx].filter_state =
558 		htole32((pdq->pdq_flags & PDQ_PASS_SMT) ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
559 	    idx++;
560 	    filter_set->filter_set_items[idx].item_code =
561 		htole32(PDQI_EOL);
562 	    break;
563 	}
564 	case PDQC_ADDR_FILTER_SET: {
565 	    pdq_cmd_addr_filter_set_t *addr_filter_set = (pdq_cmd_addr_filter_set_t *) ci->ci_request_bufstart;
566 	    pdq_lanaddr_t *addr = addr_filter_set->addr_filter_set_addresses;
567 	    addr->lanaddr_bytes[0] = 0xFF;
568 	    addr->lanaddr_bytes[1] = 0xFF;
569 	    addr->lanaddr_bytes[2] = 0xFF;
570 	    addr->lanaddr_bytes[3] = 0xFF;
571 	    addr->lanaddr_bytes[4] = 0xFF;
572 	    addr->lanaddr_bytes[5] = 0xFF;
573 	    addr++;
574 	    pdq_os_addr_fill(pdq, addr, 61);
575 	    break;
576 	}
577 	case PDQC_SNMP_SET: {
578 	    pdq_cmd_snmp_set_t *snmp_set = (pdq_cmd_snmp_set_t *) ci->ci_request_bufstart;
579 	    unsigned idx = 0;
580 	    snmp_set->snmp_set_items[idx].item_code = htole32(PDQSNMP_FULL_DUPLEX_ENABLE);
581 	    snmp_set->snmp_set_items[idx].item_value = htole32(pdq->pdq_flags & PDQ_WANT_FDX ? 1 : 2);
582 	    snmp_set->snmp_set_items[idx].item_port = 0;
583 	    idx++;
584 	    snmp_set->snmp_set_items[idx].item_code = htole32(PDQSNMP_EOL);
585 	    break;
586 	}
587 	default: {	/* to make gcc happy */
588 	    break;
589 	}
590     }
591 
592 
593     /*
594      * Sync the command request buffer and descriptor, then advance
595      * the request producer index.
596      */
597     PDQ_OS_CMDRQST_PRESYNC(pdq, cmdlen);
598     PDQ_OS_DESC_PRESYNC(pdq, txd, sizeof(pdq_txdesc_t));
599     PDQ_ADVANCE(ci->ci_request_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests));
600 
601     /*
602      * Sync the command response buffer and advance the response
603      * producer index (descriptor is already pre-initialized)
604      */
605     PDQ_OS_CMDRSP_PRESYNC(pdq, PDQ_SIZE_COMMAND_RESPONSE);
606     PDQ_ADVANCE(ci->ci_response_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses));
607     /*
608      * At this point the command is done.  All that needs to be done is to
609      * produce it to the PDQ.
610      */
611     PDQ_PRINTF(("PDQ Queue Command Request: %s queued\n",
612 		pdq_cmd_info[op].cmd_name));
613 
614     ci->ci_command_active++;
615     PDQ_CSR_WRITE(csrs, csr_cmd_response_producer, ci->ci_response_producer | (ci->ci_response_completion << 8));
616     PDQ_CSR_WRITE(csrs, csr_cmd_request_producer, ci->ci_request_producer | (ci->ci_request_completion << 8));
617 }
618 
619 static void
pdq_process_command_responses(pdq_t * const pdq)620 pdq_process_command_responses(
621     pdq_t * const pdq)
622 {
623     const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
624     pdq_command_info_t * const ci = &pdq->pdq_command_info;
625     volatile const pdq_consumer_block_t * const cbp = pdq->pdq_cbp;
626     pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
627     const pdq_response_generic_t *rspgen;
628     pdq_cmd_code_t op;
629     pdq_response_code_t status __unused;
630 
631     /*
632      * We have to process the command and response in tandem so
633      * just wait for the response to be consumed.  If it has been
634      * consumed then the command must have been as well.
635      */
636 
637     if (le32toh(cbp->pdqcb_command_response) == ci->ci_response_completion)
638 	return;
639 
640     PDQ_ASSERT(le32toh(cbp->pdqcb_command_request) != ci->ci_request_completion);
641 
642     PDQ_OS_CMDRSP_POSTSYNC(pdq, PDQ_SIZE_COMMAND_RESPONSE);
643     rspgen = (const pdq_response_generic_t *) ci->ci_response_bufstart;
644     op = le32toh(rspgen->generic_op);
645     status = le32toh(rspgen->generic_status);
646     PDQ_ASSERT(op == ci->ci_queued_commands[ci->ci_request_completion]);
647     PDQ_ASSERT(status == PDQR_SUCCESS);
648     PDQ_PRINTF(("PDQ Process Command Response: %s completed (status=%d [0x%x])\n",
649 		pdq_cmd_info[op].cmd_name,
650 		htole32(status),
651 		htole32(status)));
652 
653     if (op == PDQC_STATUS_CHARS_GET && (pdq->pdq_flags & PDQ_PRINTCHARS)) {
654 	pdq->pdq_flags &= ~PDQ_PRINTCHARS;
655 	pdq_print_fddi_chars(pdq, (const pdq_response_status_chars_get_t *) rspgen);
656     } else if (op == PDQC_DEC_EXT_MIB_GET) {
657 	pdq->pdq_flags &= ~PDQ_IS_FDX;
658 	if (le32toh(((const pdq_response_dec_ext_mib_get_t *)rspgen)->dec_ext_mib_get.fdx_operational))
659 	    pdq->pdq_flags |= PDQ_IS_FDX;
660     }
661 
662     PDQ_ADVANCE(ci->ci_request_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests));
663     PDQ_ADVANCE(ci->ci_response_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses));
664     ci->ci_command_active = 0;
665 
666     if (ci->ci_pending_commands != 0) {
667 	pdq_queue_commands(pdq);
668     } else {
669 	PDQ_CSR_WRITE(csrs, csr_cmd_response_producer,
670 		      ci->ci_response_producer | (ci->ci_response_completion << 8));
671 	PDQ_CSR_WRITE(csrs, csr_cmd_request_producer,
672 		      ci->ci_request_producer | (ci->ci_request_completion << 8));
673     }
674 }
675 
676 /*
677  * This following routine processes unsolicited events.
678  * In addition, it also fills the unsolicited queue with
679  * event buffers so it can be used to initialize the queue
680  * as well.
681  */
682 static void
pdq_process_unsolicited_events(pdq_t * pdq)683 pdq_process_unsolicited_events(
684     pdq_t *pdq)
685 {
686     const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
687     pdq_unsolicited_info_t *ui = &pdq->pdq_unsolicited_info;
688     volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp;
689     pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
690 
691     /*
692      * Process each unsolicited event (if any).
693      */
694 
695     while (le32toh(cbp->pdqcb_unsolicited_event) != ui->ui_completion) {
696 	const pdq_unsolicited_event_t *event;
697 	pdq_entity_t entity;
698 	uint32_t value;
699 	event = &ui->ui_events[ui->ui_completion & (PDQ_NUM_UNSOLICITED_EVENTS-1)];
700 	PDQ_OS_UNSOL_EVENT_POSTSYNC(pdq, event);
701 
702 	switch (event->event_type) {
703 	    case PDQ_UNSOLICITED_EVENT: {
704 		int bad_event = 0;
705 		entity = le32toh(event->event_entity);
706 		value = le32toh(event->event_code.value);
707 		switch (entity) {
708 		    case PDQ_ENTITY_STATION: {
709 			bad_event = value >= PDQ_STATION_EVENT_MAX;
710 			break;
711 		    }
712 		    case PDQ_ENTITY_LINK: {
713 			bad_event = value >= PDQ_LINK_EVENT_MAX;
714 			break;
715 		    }
716 		    case PDQ_ENTITY_PHY_PORT: {
717 			bad_event = value >= PDQ_PHY_EVENT_MAX;
718 			break;
719 		    }
720 		    default: {
721 			bad_event = 1;
722 			break;
723 		    }
724 		}
725 		if (bad_event) {
726 		    break;
727 		}
728 		printf(PDQ_OS_PREFIX "Unsolicited Event: %s: %s",
729 		       PDQ_OS_PREFIX_ARGS,
730 		       pdq_entities[entity],
731 		       pdq_event_codes[entity][value]);
732 		if (event->event_entity == PDQ_ENTITY_PHY_PORT)
733 		    printf("[%d]", le32toh(event->event_index));
734 		printf("\n");
735 		break;
736 	    }
737 	    case PDQ_UNSOLICITED_COUNTERS: {
738 		break;
739 	    }
740 	}
741 	PDQ_OS_UNSOL_EVENT_PRESYNC(pdq, event);
742 	PDQ_ADVANCE(ui->ui_completion, 1, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events));
743 	ui->ui_free++;
744     }
745 
746     /*
747      * Now give back the event buffers back to the PDQ.
748      */
749     PDQ_ADVANCE(ui->ui_producer, ui->ui_free, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events));
750     ui->ui_free = 0;
751 
752     PDQ_CSR_WRITE(csrs, csr_unsolicited_producer,
753 		  ui->ui_producer | (ui->ui_completion << 8));
754 }
755 
756 static void
pdq_process_received_data(pdq_t * pdq,pdq_rx_info_t * rx,pdq_rxdesc_t * receives,pdq_uint32_t completion_goal,pdq_uint32_t ring_mask)757 pdq_process_received_data(
758     pdq_t *pdq,
759     pdq_rx_info_t *rx,
760     pdq_rxdesc_t *receives,
761     pdq_uint32_t completion_goal,
762     pdq_uint32_t ring_mask)
763 {
764     pdq_uint32_t completion = rx->rx_completion;
765     pdq_uint32_t producer = rx->rx_producer;
766     PDQ_OS_DATABUF_T **buffers = (PDQ_OS_DATABUF_T **) rx->rx_buffers;
767     pdq_rxdesc_t *rxd;
768     pdq_uint32_t idx;
769 
770     while (completion != completion_goal) {
771 	PDQ_OS_DATABUF_T *fpdu, *lpdu, *npdu;
772 	pdq_uint8_t *dataptr;
773 	pdq_uint32_t fc, datalen, pdulen, segcnt;
774 	pdq_uint32_t status;
775 
776 	fpdu = lpdu = buffers[completion];
777 	PDQ_ASSERT(fpdu != NULL);
778 	PDQ_OS_RXPDU_POSTSYNC(pdq, fpdu, 0, sizeof(u_int32_t));
779 	dataptr = PDQ_OS_DATABUF_PTR(fpdu);
780 	status = le32toh(*(pdq_uint32_t *) dataptr);
781 	if (PDQ_RXS_RCC_BADPDU(status) == 0) {
782 	    datalen = PDQ_RXS_LEN(status);
783 	    PDQ_OS_RXPDU_POSTSYNC(pdq, fpdu, sizeof(u_int32_t),
784 				  PDQ_RX_FC_OFFSET + 1 - sizeof(u_int32_t));
785 	    fc = dataptr[PDQ_RX_FC_OFFSET];
786 	    switch (fc & (PDQ_FDDIFC_C|PDQ_FDDIFC_L|PDQ_FDDIFC_F)) {
787 		case PDQ_FDDI_LLC_ASYNC:
788 		case PDQ_FDDI_LLC_SYNC:
789 		case PDQ_FDDI_IMP_ASYNC:
790 		case PDQ_FDDI_IMP_SYNC: {
791 		    if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_LLC_MIN) {
792 			PDQ_PRINTF(("discard: bad length %d\n", datalen));
793 			goto discard_frame;
794 		    }
795 		    break;
796 		}
797 		case PDQ_FDDI_SMT: {
798 		    if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_SMT_MIN)
799 			goto discard_frame;
800 		    break;
801 		}
802 		default: {
803 		    PDQ_PRINTF(("discard: bad fc 0x%x\n", fc));
804 		    goto discard_frame;
805 		}
806 	    }
807 	    /*
808 	     * Update the lengths of the data buffers now that we know
809 	     * the real length.
810 	     */
811 	    pdulen = datalen + (PDQ_RX_FC_OFFSET - PDQ_OS_HDR_OFFSET) - 4 /* CRC */;
812 	    segcnt = (pdulen + PDQ_OS_HDR_OFFSET + PDQ_OS_DATABUF_SIZE - 1) / PDQ_OS_DATABUF_SIZE;
813 	    PDQ_OS_DATABUF_ALLOC(pdq, npdu);
814 	    if (npdu == NULL) {
815 		PDQ_PRINTF(("discard: no databuf #0\n"));
816 		goto discard_frame;
817 	    }
818 	    buffers[completion] = npdu;
819 	    for (idx = 1; idx < segcnt; idx++) {
820 		PDQ_OS_DATABUF_ALLOC(pdq, npdu);
821 		if (npdu == NULL) {
822 		    PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL);
823 		    PDQ_OS_DATABUF_FREE(pdq, fpdu);
824 		    goto discard_frame;
825 		}
826 		PDQ_OS_DATABUF_NEXT_SET(lpdu, buffers[(completion + idx) & ring_mask]);
827 		lpdu = PDQ_OS_DATABUF_NEXT(lpdu);
828 		buffers[(completion + idx) & ring_mask] = npdu;
829 	    }
830 	    PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL);
831 	    for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
832 		buffers[(producer + idx) & ring_mask] =
833 		    buffers[(completion + idx) & ring_mask];
834 		buffers[(completion + idx) & ring_mask] = NULL;
835 	    }
836 	    PDQ_OS_DATABUF_ADJ(fpdu, PDQ_OS_HDR_OFFSET);
837 	    if (segcnt == 1) {
838 		PDQ_OS_DATABUF_LEN_SET(fpdu, pdulen);
839 	    } else {
840 		PDQ_OS_DATABUF_LEN_SET(lpdu, pdulen + PDQ_OS_HDR_OFFSET - (segcnt - 1) * PDQ_OS_DATABUF_SIZE);
841 	    }
842 	    /*
843 	     * Do not pass to protocol if packet was received promiscuously
844 	     */
845 	    pdq_os_receive_pdu(pdq, fpdu, pdulen,
846 			       PDQ_RXS_RCC_DD(status) < PDQ_RXS_RCC_DD_CAM_MATCH);
847 	    rx->rx_free += PDQ_RX_SEGCNT;
848 	    PDQ_ADVANCE(producer, PDQ_RX_SEGCNT, ring_mask);
849 	    PDQ_ADVANCE(completion, PDQ_RX_SEGCNT, ring_mask);
850 	    continue;
851 	} else {
852 	    PDQ_PRINTF(("discard: bad pdu 0x%x(%d.%d.%d.%d.%d)\n", status.rxs_status,
853 			PDQ_RXS_RCC_BADPDU(status), PDQ_RXS_RCC_BADCRC(status),
854 			PDQ_RXS_RCC_REASON(status), PDQ_RXS_FSC(status),
855 			PDQ_RXS_FSB_E(status)));
856 	    if (PDQ_RXS_RCC_REASON(status) == 7)
857 		goto discard_frame;
858 	    if (PDQ_RXS_RCC_REASON(status) != 0) {
859 		/* hardware fault */
860 		if (PDQ_RXS_RCC_BADCRC(status)) {
861 		    printf(PDQ_OS_PREFIX " MAC CRC error (source=%x-%x-%x-%x-%x-%x)\n",
862 			   PDQ_OS_PREFIX_ARGS,
863 			   dataptr[PDQ_RX_FC_OFFSET+1],
864 			   dataptr[PDQ_RX_FC_OFFSET+2],
865 			   dataptr[PDQ_RX_FC_OFFSET+3],
866 			   dataptr[PDQ_RX_FC_OFFSET+4],
867 			   dataptr[PDQ_RX_FC_OFFSET+5],
868 			   dataptr[PDQ_RX_FC_OFFSET+6]);
869 		    /* rx->rx_badcrc++; */
870 		} else if (PDQ_RXS_FSC(status) == 0 || PDQ_RXS_FSB_E(status) == 1) {
871 		    /* rx->rx_frame_status_errors++; */
872 		} else {
873 		    /* hardware fault */
874 		}
875 	    }
876 	}
877       discard_frame:
878 	/*
879 	 * Discarded frames go right back on the queue; therefore
880 	 * ring entries were freed.
881 	 */
882 	for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
883 	    buffers[producer] = buffers[completion];
884 	    buffers[completion] = NULL;
885 	    rxd = &receives[rx->rx_producer];
886 	    if (idx == 0) {
887 		rxd->rxd_pa_hi = htole32(
888 		    PDQ_RXDESC_SOP |
889 		    PDQ_RXDESC_SEG_CNT(PDQ_RX_SEGCNT - 1) |
890 		    PDQ_RXDESC_SEG_LEN(PDQ_OS_DATABUF_SIZE));
891 	    } else {
892 		rxd->rxd_pa_hi =
893 		    htole32(PDQ_RXDESC_SEG_LEN(PDQ_OS_DATABUF_SIZE));
894 	    }
895 	    rxd->rxd_pa_lo = htole32(PDQ_OS_DATABUF_BUSPA(pdq, buffers[rx->rx_producer]));
896 	    PDQ_OS_RXPDU_PRESYNC(pdq, buffers[rx->rx_producer], 0, PDQ_OS_DATABUF_SIZE);
897 	    PDQ_OS_DESC_PRESYNC(pdq, rxd, sizeof(*rxd));
898 	    PDQ_ADVANCE(rx->rx_producer, 1, ring_mask);
899 	    PDQ_ADVANCE(producer, 1, ring_mask);
900 	    PDQ_ADVANCE(completion, 1, ring_mask);
901 	}
902     }
903     rx->rx_completion = completion;
904 
905     while (rx->rx_free > PDQ_RX_SEGCNT && rx->rx_free > rx->rx_target) {
906 	PDQ_OS_DATABUF_T *pdu;
907 	/*
908 	 * Allocate the needed number of data buffers.
909 	 * Try to obtain them from our free queue before
910 	 * asking the system for more.
911 	 */
912 	for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
913 	    if ((pdu = buffers[(rx->rx_producer + idx) & ring_mask]) == NULL) {
914 		PDQ_OS_DATABUF_ALLOC(pdq, pdu);
915 		if (pdu == NULL)
916 		    break;
917 		buffers[(rx->rx_producer + idx) & ring_mask] = pdu;
918 	    }
919 	    rxd = &receives[(rx->rx_producer + idx) & ring_mask];
920 	    if (idx == 0) {
921 		rxd->rxd_pa_hi = htole32(
922 		    PDQ_RXDESC_SOP|
923 		    PDQ_RXDESC_SEG_CNT(PDQ_RX_SEGCNT - 1)|
924 		    PDQ_RXDESC_SEG_LEN(PDQ_OS_DATABUF_SIZE));
925 	    } else {
926 		rxd->rxd_pa_hi =
927 		    htole32(PDQ_RXDESC_SEG_LEN(PDQ_OS_DATABUF_SIZE));
928 	    }
929 	    rxd->rxd_pa_lo = htole32(PDQ_OS_DATABUF_BUSPA(pdq, pdu));
930 	    PDQ_OS_RXPDU_PRESYNC(pdq, pdu, 0, PDQ_OS_DATABUF_SIZE);
931 	    PDQ_OS_DESC_PRESYNC(pdq, rxd, sizeof(*rxd));
932 	}
933 	if (idx < PDQ_RX_SEGCNT) {
934 	    /*
935 	     * We didn't get all databufs required to complete a new
936 	     * receive buffer.  Keep the ones we got and retry a bit
937 	     * later for the rest.
938 	     */
939 	    break;
940 	}
941 	PDQ_ADVANCE(rx->rx_producer, PDQ_RX_SEGCNT, ring_mask);
942 	rx->rx_free -= PDQ_RX_SEGCNT;
943     }
944 }
945 
946 static void pdq_process_transmitted_data(pdq_t *pdq);
947 
948 pdq_boolean_t
pdq_queue_transmit_data(pdq_t * pdq,PDQ_OS_DATABUF_T * pdu)949 pdq_queue_transmit_data(
950     pdq_t *pdq,
951     PDQ_OS_DATABUF_T *pdu)
952 {
953     pdq_tx_info_t * const tx = &pdq->pdq_tx_info;
954     pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
955     pdq_uint32_t producer = tx->tx_producer;
956     pdq_txdesc_t *eop = NULL;
957     PDQ_OS_DATABUF_T *pdu0;
958     pdq_uint32_t freecnt;
959 #if defined(PDQ_BUS_DMA)
960     bus_dmamap_t map;
961 #endif
962 
963   again:
964     if (PDQ_RX_FC_OFFSET == PDQ_OS_HDR_OFFSET) {
965 	freecnt = tx->tx_free - 1;
966     } else {
967 	freecnt = tx->tx_free;
968     }
969     /*
970      * Need 2 or more descriptors to be able to send.
971      */
972     if (freecnt == 0) {
973 	pdq->pdq_intrmask |= PDQ_HOST_INT_TX_ENABLE;
974 	PDQ_CSR_WRITE(&pdq->pdq_csrs, csr_host_int_enable, pdq->pdq_intrmask);
975 	return PDQ_FALSE;
976     }
977 
978     if (PDQ_RX_FC_OFFSET == PDQ_OS_HDR_OFFSET) {
979 	dbp->pdqdb_transmits[producer] = tx->tx_hdrdesc;
980 	PDQ_OS_DESC_PRESYNC(pdq, &dbp->pdqdb_transmits[producer], sizeof(pdq_txdesc_t));
981 	PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
982     }
983 
984 #if defined(PDQ_BUS_DMA)
985     map = M_GETCTX(pdu, bus_dmamap_t);
986     if (freecnt >= map->dm_nsegs) {
987 	int idx;
988 	for (idx = 0; idx < map->dm_nsegs; idx++) {
989 	    /*
990 	     * Initialize the transmit descriptor
991 	     */
992 	    eop = &dbp->pdqdb_transmits[producer];
993 	    eop->txd_pa_hi =
994 		htole32(PDQ_TXDESC_SEG_LEN(map->dm_segs[idx].ds_len));
995 	    eop->txd_pa_lo = htole32(map->dm_segs[idx].ds_addr);
996 	    PDQ_OS_DESC_PRESYNC(pdq, eop, sizeof(pdq_txdesc_t));
997 	    freecnt--;
998 	    PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
999 	}
1000 	pdu0 = NULL;
1001     } else {
1002 	pdu0 = pdu;
1003     }
1004 #else
1005     for (freecnt = tx->tx_free - 1, pdu0 = pdu; pdu0 != NULL && freecnt > 0;) {
1006 	pdq_uint32_t fraglen, datalen = PDQ_OS_DATABUF_LEN(pdu0);
1007 	const pdq_uint8_t *dataptr = PDQ_OS_DATABUF_PTR(pdu0);
1008 
1009 	/*
1010 	 * The first segment is limited to the space remaining in
1011 	 * page.  All segments after that can be up to a full page
1012 	 * in size.
1013 	 */
1014 	fraglen = PDQ_OS_PAGESIZE - ((dataptr - (pdq_uint8_t *) NULL) & (PDQ_OS_PAGESIZE-1));
1015 	while (datalen > 0 && freecnt > 0) {
1016 	    pdq_uint32_t seglen = (fraglen < datalen ? fraglen : datalen);
1017 
1018 	    /*
1019 	     * Initialize the transmit descriptor
1020 	     */
1021 	    eop = &dbp->pdqdb_transmits[producer];
1022 	    eop->txd_pa_hi = htole32(PDQ_TXDESC_SEG_LEN(seglen));
1023 	    eop->txd_pa_lo = htole32(PDQ_OS_VA_TO_BUSPA(pdq, dataptr));
1024 	    PDQ_OS_DESC_PRESYNC(pdq, eop, sizeof(pdq_txdesc_t));
1025 	    datalen -= seglen;
1026 	    dataptr += seglen;
1027 	    fraglen = PDQ_OS_PAGESIZE;
1028 	    freecnt--;
1029 	    PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
1030 	}
1031 	pdu0 = PDQ_OS_DATABUF_NEXT(pdu0);
1032     }
1033 #endif /* defined(PDQ_BUS_DMA) */
1034     if (pdu0 != NULL) {
1035 	unsigned completion = tx->tx_completion;
1036 	PDQ_ASSERT(freecnt == 0);
1037 	PDQ_OS_CONSUMER_POSTSYNC(pdq);
1038 	pdq_process_transmitted_data(pdq);
1039 	if (completion != tx->tx_completion) {
1040 	    producer = tx->tx_producer;
1041 	    eop = NULL;
1042 	    goto again;
1043 	}
1044 	/*
1045 	 * If we still have data to process then the ring was too full
1046 	 * to store the PDU.  Return FALSE so the caller will requeue
1047 	 * the PDU for later.
1048 	 */
1049 	pdq->pdq_intrmask |= PDQ_HOST_INT_TX_ENABLE;
1050 	PDQ_CSR_WRITE(&pdq->pdq_csrs, csr_host_int_enable, pdq->pdq_intrmask);
1051 	return PDQ_FALSE;
1052     }
1053     /*
1054      * Everything went fine.  Finish it up.
1055      */
1056     tx->tx_descriptor_count[tx->tx_producer] = tx->tx_free - freecnt;
1057     if (PDQ_RX_FC_OFFSET != PDQ_OS_HDR_OFFSET) {
1058 	dbp->pdqdb_transmits[tx->tx_producer].txd_pa_hi |=
1059 	    htole32(PDQ_TXDESC_SOP);
1060 	PDQ_OS_DESC_PRESYNC(pdq, &dbp->pdqdb_transmits[tx->tx_producer],
1061 	    sizeof(pdq_txdesc_t));
1062     }
1063     eop->txd_pa_hi |= htole32(PDQ_TXDESC_EOP);
1064     PDQ_OS_DESC_PRESYNC(pdq, eop, sizeof(pdq_txdesc_t));
1065     PDQ_OS_DATABUF_ENQUEUE(&tx->tx_txq, pdu);
1066     tx->tx_producer = producer;
1067     tx->tx_free = freecnt;
1068     PDQ_DO_TYPE2_PRODUCER(pdq);
1069     return PDQ_TRUE;
1070 }
1071 
1072 static void
pdq_process_transmitted_data(pdq_t * pdq)1073 pdq_process_transmitted_data(
1074     pdq_t *pdq)
1075 {
1076     pdq_tx_info_t *tx = &pdq->pdq_tx_info;
1077     volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp;
1078     pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
1079     pdq_uint32_t completion = tx->tx_completion;
1080     int reclaimed = 0;
1081 
1082     while (completion != le16toh(cbp->pdqcb_transmits)) {
1083 	PDQ_OS_DATABUF_T *pdu;
1084 	pdq_uint32_t descriptor_count = tx->tx_descriptor_count[completion];
1085 	PDQ_ASSERT(dbp->pdqdb_transmits[completion].txd_sop == 1);
1086 	PDQ_ASSERT(dbp->pdqdb_transmits[(completion + descriptor_count - 1) & PDQ_RING_MASK(dbp->pdqdb_transmits)].txd_eop == 1);
1087 	PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu);
1088 	pdq_os_transmit_done(pdq, pdu);
1089 	tx->tx_free += descriptor_count;
1090 	reclaimed = 1;
1091 	PDQ_ADVANCE(completion, descriptor_count, PDQ_RING_MASK(dbp->pdqdb_transmits));
1092     }
1093     if (tx->tx_completion != completion) {
1094 	tx->tx_completion = completion;
1095 	pdq->pdq_intrmask &= ~PDQ_HOST_INT_TX_ENABLE;
1096 	PDQ_CSR_WRITE(&pdq->pdq_csrs, csr_host_int_enable, pdq->pdq_intrmask);
1097 	pdq_os_restart_transmitter(pdq);
1098     }
1099     if (reclaimed)
1100 	PDQ_DO_TYPE2_PRODUCER(pdq);
1101 }
1102 
1103 void
pdq_flush_transmitter(pdq_t * pdq)1104 pdq_flush_transmitter(
1105     pdq_t *pdq)
1106 {
1107     volatile pdq_consumer_block_t *cbp = pdq->pdq_cbp;
1108     pdq_tx_info_t *tx = &pdq->pdq_tx_info;
1109 
1110     for (;;) {
1111 	PDQ_OS_DATABUF_T *pdu;
1112 	PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu);
1113 	if (pdu == NULL)
1114 	    break;
1115 	/*
1116 	 * Don't call transmit done since the packet never made it
1117 	 * out on the wire.
1118 	 */
1119 	PDQ_OS_DATABUF_FREE(pdq, pdu);
1120     }
1121 
1122     tx->tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
1123     tx->tx_completion = tx->tx_producer;
1124     cbp->pdqcb_transmits = htole16(tx->tx_completion);
1125     PDQ_OS_CONSUMER_PRESYNC(pdq);
1126 
1127     PDQ_DO_TYPE2_PRODUCER(pdq);
1128 }
1129 
1130 void
pdq_hwreset(pdq_t * pdq)1131 pdq_hwreset(
1132     pdq_t *pdq)
1133 {
1134     const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1135     pdq_state_t state;
1136     int cnt;
1137 
1138     state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1139     if (state == PDQS_DMA_UNAVAILABLE)
1140 	return;
1141     PDQ_CSR_WRITE(csrs, csr_port_data_a,
1142 		  (state == PDQS_HALTED && pdq->pdq_type != PDQ_DEFTA) ? 0 : PDQ_PRESET_SKIP_SELFTEST);
1143     PDQ_CSR_WRITE(csrs, csr_port_reset, 1);
1144     PDQ_OS_USEC_DELAY(100);
1145     PDQ_CSR_WRITE(csrs, csr_port_reset, 0);
1146     for (cnt = 100000;;cnt--) {
1147 	PDQ_OS_USEC_DELAY(1000);
1148 	state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1149 	if (state == PDQS_DMA_UNAVAILABLE || cnt == 0)
1150 	    break;
1151     }
1152     PDQ_PRINTF(("PDQ Reset spun %d cycles\n", 100000 - cnt));
1153     PDQ_OS_USEC_DELAY(10000);
1154     state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1155     PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1156     PDQ_ASSERT(cnt > 0);
1157 }
1158 
1159 /*
1160  * The following routine brings the PDQ from whatever state it is
1161  * in to DMA_UNAVAILABLE (ie. like a RESET but without doing a RESET).
1162  */
1163 pdq_state_t
pdq_stop(pdq_t * pdq)1164 pdq_stop(
1165     pdq_t *pdq)
1166 {
1167     pdq_state_t state;
1168     const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1169     int cnt, pass = 0, idx;
1170     PDQ_OS_DATABUF_T **buffers;
1171 
1172   restart:
1173     state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1174     if (state != PDQS_DMA_UNAVAILABLE) {
1175 	pdq_hwreset(pdq);
1176 	state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1177 	PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1178     }
1179 #if 0
1180     switch (state) {
1181 	case PDQS_RING_MEMBER:
1182 	case PDQS_LINK_UNAVAILABLE:
1183 	case PDQS_LINK_AVAILABLE: {
1184 	    PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_LINK_UNINIT);
1185 	    PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1186 	    pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
1187 	    state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1188 	    PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1189 	    /* FALL THROUGH */
1190 	}
1191 	case PDQS_DMA_AVAILABLE: {
1192 	    PDQ_CSR_WRITE(csrs, csr_port_data_a, 0);
1193 	    PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1194 	    pdq_do_port_control(csrs, PDQ_PCTL_DMA_UNINIT);
1195 	    state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1196 	    PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1197 	    /* FALL THROUGH */
1198 	}
1199 	case PDQS_DMA_UNAVAILABLE: {
1200 	    break;
1201 	}
1202     }
1203 #endif
1204     /*
1205      * Now we should be in DMA_UNAVAILABLE.  So bring the PDQ into
1206      * DMA_AVAILABLE.
1207      */
1208 
1209     /*
1210      * Obtain the hardware address and firmware revisions
1211      * (MLA = my long address which is FDDI speak for hardware address)
1212      */
1213     pdq_read_mla(&pdq->pdq_csrs, &pdq->pdq_hwaddr);
1214     pdq_read_fwrev(&pdq->pdq_csrs, &pdq->pdq_fwrev);
1215     pdq->pdq_chip_rev = pdq_read_chiprev(&pdq->pdq_csrs);
1216 
1217     if (pdq->pdq_type == PDQ_DEFPA) {
1218 	/*
1219 	 * Disable interrupts and DMA.
1220 	 */
1221 	PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control, 0);
1222 	PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x10);
1223     }
1224 
1225     /*
1226      * Flush all the databuf queues.
1227      */
1228     pdq_flush_databuf_queue(pdq, &pdq->pdq_tx_info.tx_txq);
1229     pdq->pdq_flags &= ~(PDQ_TXOK|PDQ_IS_ONRING|PDQ_IS_FDX);
1230     buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_rx_info.rx_buffers;
1231     for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_receives); idx++) {
1232 	if (buffers[idx] != NULL) {
1233 	    PDQ_OS_DATABUF_FREE(pdq, buffers[idx]);
1234 	    buffers[idx] = NULL;
1235 	}
1236     }
1237     pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives);
1238     buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_host_smt_info.rx_buffers;
1239     for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_host_smt); idx++) {
1240 	if (buffers[idx] != NULL) {
1241 	    PDQ_OS_DATABUF_FREE(pdq, buffers[idx]);
1242 	    buffers[idx] = NULL;
1243 	}
1244     }
1245     pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt);
1246 
1247     /*
1248      * Reset the consumer indexes to 0.
1249      */
1250     pdq->pdq_cbp->pdqcb_receives = 0;
1251     pdq->pdq_cbp->pdqcb_transmits = 0;
1252     pdq->pdq_cbp->pdqcb_host_smt = 0;
1253     pdq->pdq_cbp->pdqcb_unsolicited_event = 0;
1254     pdq->pdq_cbp->pdqcb_command_response = 0;
1255     pdq->pdq_cbp->pdqcb_command_request = 0;
1256     PDQ_OS_CONSUMER_PRESYNC(pdq);
1257 
1258     /*
1259      * Reset the producer and completion indexes to 0.
1260      */
1261     pdq->pdq_command_info.ci_request_producer = 0;
1262     pdq->pdq_command_info.ci_response_producer = 0;
1263     pdq->pdq_command_info.ci_request_completion = 0;
1264     pdq->pdq_command_info.ci_response_completion = 0;
1265     pdq->pdq_unsolicited_info.ui_producer = 0;
1266     pdq->pdq_unsolicited_info.ui_completion = 0;
1267     pdq->pdq_rx_info.rx_producer = 0;
1268     pdq->pdq_rx_info.rx_completion = 0;
1269     pdq->pdq_tx_info.tx_producer = 0;
1270     pdq->pdq_tx_info.tx_completion = 0;
1271     pdq->pdq_host_smt_info.rx_producer = 0;
1272     pdq->pdq_host_smt_info.rx_completion = 0;
1273 
1274     pdq->pdq_command_info.ci_command_active = 0;
1275     pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS;
1276     pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
1277 
1278     /*
1279      * Allow the DEFPA to do DMA.  Then program the physical
1280      * addresses of the consumer and descriptor blocks.
1281      */
1282     if (pdq->pdq_type == PDQ_DEFPA) {
1283 #ifdef PDQTEST
1284 	PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control,
1285 		      PDQ_PFI_MODE_DMA_ENABLE);
1286 #else
1287 	PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control,
1288 		      PDQ_PFI_MODE_DMA_ENABLE
1289 	    /*|PDQ_PFI_MODE_PFI_PCI_INTR*/|PDQ_PFI_MODE_PDQ_PCI_INTR);
1290 #endif
1291     }
1292 
1293     /*
1294      * Make sure the unsolicited queue has events ...
1295      */
1296     pdq_process_unsolicited_events(pdq);
1297 
1298     if ((pdq->pdq_type == PDQ_DEFEA && pdq->pdq_chip_rev == PDQ_CHIP_REV_E)
1299 	    || pdq->pdq_type == PDQ_DEFTA)
1300 	PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_16LW);
1301     else
1302 	PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_8LW);
1303     PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_DMA_BURST_SIZE_SET);
1304     pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
1305 
1306     /*
1307      * Make sure there isn't stale information in the caches before
1308      * tell the adapter about the blocks it's going to use.
1309      */
1310     PDQ_OS_CONSUMER_PRESYNC(pdq);
1311 
1312     PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1313     PDQ_CSR_WRITE(csrs, csr_port_data_a, pdq->pdq_pa_consumer_block);
1314     pdq_do_port_control(csrs, PDQ_PCTL_CONSUMER_BLOCK);
1315 
1316     PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1317     PDQ_CSR_WRITE(csrs, csr_port_data_a, pdq->pdq_pa_descriptor_block | PDQ_DMA_INIT_LW_BSWAP_DATA);
1318     pdq_do_port_control(csrs, PDQ_PCTL_DMA_INIT);
1319 
1320     for (cnt = 0; cnt < 1000; cnt++) {
1321 	state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1322 	if (state == PDQS_HALTED) {
1323 	    if (pass > 0)
1324 		return PDQS_HALTED;
1325 	    pass = 1;
1326 	    goto restart;
1327 	}
1328 	if (state == PDQS_DMA_AVAILABLE) {
1329 	    PDQ_PRINTF(("Transition to DMA Available took %d spins\n", cnt));
1330 	    break;
1331 	}
1332 	PDQ_OS_USEC_DELAY(1000);
1333     }
1334     PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1335 
1336     PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF);
1337     pdq->pdq_intrmask = 0;
1338       /* PDQ_HOST_INT_STATE_CHANGE
1339 	|PDQ_HOST_INT_FATAL_ERROR|PDQ_HOST_INT_CMD_RSP_ENABLE
1340 	|PDQ_HOST_INT_UNSOL_ENABLE */;
1341     PDQ_CSR_WRITE(csrs, csr_host_int_enable, pdq->pdq_intrmask);
1342 
1343     /*
1344      * Any other command but START should be valid.
1345      */
1346     pdq->pdq_command_info.ci_pending_commands &= ~(PDQ_BITMASK(PDQC_START));
1347     if (pdq->pdq_flags & PDQ_PRINTCHARS)
1348 	pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1349     pdq_queue_commands(pdq);
1350 
1351     if (pdq->pdq_flags & PDQ_PRINTCHARS) {
1352 	/*
1353 	 * Now wait (up to 100ms) for the command(s) to finish.
1354 	 */
1355 	for (cnt = 0; cnt < 1000; cnt++) {
1356 	    PDQ_OS_CONSUMER_POSTSYNC(pdq);
1357 	    pdq_process_command_responses(pdq);
1358 	    if (pdq->pdq_command_info.ci_response_producer == pdq->pdq_command_info.ci_response_completion)
1359 		break;
1360 	    PDQ_OS_USEC_DELAY(1000);
1361 	}
1362 	state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1363     }
1364 
1365     return state;
1366 }
1367 
1368 void
pdq_run(pdq_t * pdq)1369 pdq_run(
1370     pdq_t *pdq)
1371 {
1372     const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1373     pdq_state_t state;
1374 
1375     state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1376     PDQ_ASSERT(state != PDQS_DMA_UNAVAILABLE);
1377     PDQ_ASSERT(state != PDQS_RESET);
1378     PDQ_ASSERT(state != PDQS_HALTED);
1379     PDQ_ASSERT(state != PDQS_UPGRADE);
1380     PDQ_ASSERT(state != PDQS_RING_MEMBER);
1381     switch (state) {
1382 	case PDQS_DMA_AVAILABLE: {
1383 	    /*
1384 	     * The PDQ after being reset screws up some of its state.
1385 	     * So we need to clear all the errors/interrupts so the real
1386 	     * ones will get through.
1387 	     */
1388 	    PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF);
1389 	    pdq->pdq_intrmask = PDQ_HOST_INT_STATE_CHANGE
1390 		|PDQ_HOST_INT_XMT_DATA_FLUSH|PDQ_HOST_INT_FATAL_ERROR
1391 		|PDQ_HOST_INT_CMD_RSP_ENABLE|PDQ_HOST_INT_UNSOL_ENABLE
1392 		|PDQ_HOST_INT_RX_ENABLE|PDQ_HOST_INT_HOST_SMT_ENABLE;
1393 	    PDQ_CSR_WRITE(csrs, csr_host_int_enable, pdq->pdq_intrmask);
1394 	    /*
1395 	     * Set the MAC and address filters and start up the PDQ.
1396 	     */
1397 	    pdq_process_unsolicited_events(pdq);
1398 	    pdq_process_received_data(pdq, &pdq->pdq_rx_info,
1399 				      pdq->pdq_dbp->pdqdb_receives,
1400 				      le16toh(pdq->pdq_cbp->pdqcb_receives),
1401 				      PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives));
1402 	    PDQ_DO_TYPE2_PRODUCER(pdq);
1403 	    if (pdq->pdq_flags & PDQ_PASS_SMT) {
1404 		pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1405 					  pdq->pdq_dbp->pdqdb_host_smt,
1406 					  le32toh(pdq->pdq_cbp->pdqcb_host_smt),
1407 					  PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1408 		PDQ_CSR_WRITE(csrs, csr_host_smt_producer,
1409 			      pdq->pdq_host_smt_info.rx_producer
1410 			          | (pdq->pdq_host_smt_info.rx_completion << 8));
1411 	    }
1412 	    pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET)
1413 		| PDQ_BITMASK(PDQC_ADDR_FILTER_SET)
1414 		| PDQ_BITMASK(PDQC_SNMP_SET)
1415 		| PDQ_BITMASK(PDQC_START);
1416 	    if (pdq->pdq_flags & PDQ_PRINTCHARS)
1417 		pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1418 	    pdq_queue_commands(pdq);
1419 	    break;
1420 	}
1421 	case PDQS_LINK_UNAVAILABLE:
1422 	case PDQS_LINK_AVAILABLE: {
1423 	    pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET)
1424 		| PDQ_BITMASK(PDQC_ADDR_FILTER_SET)
1425 		| PDQ_BITMASK(PDQC_SNMP_SET);
1426 	    if (pdq->pdq_flags & PDQ_PRINTCHARS)
1427 		pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1428 	    if (pdq->pdq_flags & PDQ_PASS_SMT) {
1429 		pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1430 					  pdq->pdq_dbp->pdqdb_host_smt,
1431 					  le32toh(pdq->pdq_cbp->pdqcb_host_smt),
1432 					  PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1433 		PDQ_CSR_WRITE(csrs, csr_host_smt_producer,
1434 			      pdq->pdq_host_smt_info.rx_producer
1435 			          | (pdq->pdq_host_smt_info.rx_completion << 8));
1436 	    }
1437 	    pdq_process_unsolicited_events(pdq);
1438 	    pdq_queue_commands(pdq);
1439 	    break;
1440 	}
1441 	case PDQS_RING_MEMBER: {
1442 	}
1443 	default: {	/* to make gcc happy */
1444 	    break;
1445 	}
1446     }
1447 }
1448 
1449 int
pdq_interrupt(pdq_t * pdq)1450 pdq_interrupt(
1451     pdq_t *pdq)
1452 {
1453     const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1454     pdq_uint32_t data;
1455     int progress = 0;
1456 
1457     if (pdq->pdq_type == PDQ_DEFPA)
1458 	PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18);
1459 
1460     while ((data = PDQ_CSR_READ(csrs, csr_port_status)) & PDQ_PSTS_INTR_PENDING) {
1461 	progress = 1;
1462 	PDQ_PRINTF(("PDQ Interrupt: Status = 0x%08x\n", data));
1463 	PDQ_OS_CONSUMER_POSTSYNC(pdq);
1464 	if (data & PDQ_PSTS_RCV_DATA_PENDING) {
1465 	    pdq_process_received_data(pdq, &pdq->pdq_rx_info,
1466 				      pdq->pdq_dbp->pdqdb_receives,
1467 				      le16toh(pdq->pdq_cbp->pdqcb_receives),
1468 				      PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives));
1469 	    PDQ_DO_TYPE2_PRODUCER(pdq);
1470 	}
1471 	if (data & PDQ_PSTS_HOST_SMT_PENDING) {
1472 	    pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1473 				      pdq->pdq_dbp->pdqdb_host_smt,
1474 				      le32toh(pdq->pdq_cbp->pdqcb_host_smt),
1475 				      PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1476 	    PDQ_DO_HOST_SMT_PRODUCER(pdq);
1477 	}
1478 	/* if (data & PDQ_PSTS_XMT_DATA_PENDING) */
1479 	    pdq_process_transmitted_data(pdq);
1480 	if (data & PDQ_PSTS_UNSOL_PENDING)
1481 	    pdq_process_unsolicited_events(pdq);
1482 	if (data & PDQ_PSTS_CMD_RSP_PENDING)
1483 	    pdq_process_command_responses(pdq);
1484 	if (data & PDQ_PSTS_TYPE_0_PENDING) {
1485 	    data = PDQ_CSR_READ(csrs, csr_host_int_type_0);
1486 	    if (data & PDQ_HOST_INT_STATE_CHANGE) {
1487 		pdq_state_t state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1488 		printf(PDQ_OS_PREFIX "%s", PDQ_OS_PREFIX_ARGS, pdq_adapter_states[state]);
1489 		if (state == PDQS_LINK_UNAVAILABLE) {
1490 		    pdq->pdq_flags &= ~(PDQ_TXOK|PDQ_IS_ONRING|PDQ_IS_FDX);
1491 		} else if (state == PDQS_LINK_AVAILABLE) {
1492 		    if (pdq->pdq_flags & PDQ_WANT_FDX) {
1493 			pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_DEC_EXT_MIB_GET);
1494 			pdq_queue_commands(pdq);
1495 		    }
1496 		    pdq->pdq_flags |= PDQ_TXOK|PDQ_IS_ONRING;
1497 		    pdq_os_restart_transmitter(pdq);
1498 		} else if (state == PDQS_HALTED) {
1499 		    pdq_response_error_log_get_t log_entry;
1500 		    pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(csrs, csr_port_status));
1501 		    printf(": halt code = %d (%s)\n",
1502 			   halt_code, pdq_halt_codes[halt_code]);
1503 		    if (halt_code == PDQH_DMA_ERROR && pdq->pdq_type == PDQ_DEFPA) {
1504 			PDQ_PRINTF(("\tPFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n",
1505 			       PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status),
1506 			       data & PDQ_HOST_INT_FATAL_ERROR));
1507 		    }
1508 		    PDQ_OS_MEMZERO(&log_entry, sizeof(log_entry));
1509 		    if (pdq_read_error_log(pdq, &log_entry)) {
1510 			PDQ_PRINTF(("  Error log Entry:\n"));
1511 			PDQ_PRINTF(("    CMD Status           = %d (0x%x)\n",
1512 				    log_entry.error_log_get_status,
1513 				    log_entry.error_log_get_status));
1514 			PDQ_PRINTF(("    Event Status         = %d (0x%x)\n",
1515 				    log_entry.error_log_get_event_status,
1516 				    log_entry.error_log_get_event_status));
1517 			PDQ_PRINTF(("    Caller Id            = %d (0x%x)\n",
1518 				    log_entry.error_log_get_caller_id,
1519 				    log_entry.error_log_get_caller_id));
1520 			PDQ_PRINTF(("    Write Count          = %d (0x%x)\n",
1521 				    log_entry.error_log_get_write_count,
1522 				    log_entry.error_log_get_write_count));
1523 			PDQ_PRINTF(("    FRU Implication Mask = %d (0x%x)\n",
1524 				    log_entry.error_log_get_fru_implication_mask,
1525 				    log_entry.error_log_get_fru_implication_mask));
1526 			PDQ_PRINTF(("    Test ID              = %d (0x%x)\n",
1527 				    log_entry.error_log_get_test_id,
1528 				    log_entry.error_log_get_test_id));
1529 		    }
1530 		    pdq_stop(pdq);
1531 		    if (pdq->pdq_flags & PDQ_RUNNING)
1532 			pdq_run(pdq);
1533 		    return 1;
1534 		}
1535 		printf("\n");
1536 		PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_STATE_CHANGE);
1537 	    }
1538 	    if (data & PDQ_HOST_INT_FATAL_ERROR) {
1539 		pdq_stop(pdq);
1540 		if (pdq->pdq_flags & PDQ_RUNNING)
1541 		    pdq_run(pdq);
1542 		return 1;
1543 	    }
1544 	    if (data & PDQ_HOST_INT_XMT_DATA_FLUSH) {
1545 		printf(PDQ_OS_PREFIX "Flushing transmit queue\n", PDQ_OS_PREFIX_ARGS);
1546 		pdq->pdq_flags &= ~PDQ_TXOK;
1547 		pdq_flush_transmitter(pdq);
1548 		pdq_do_port_control(csrs, PDQ_PCTL_XMT_DATA_FLUSH_DONE);
1549 		PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_XMT_DATA_FLUSH);
1550 	    }
1551 	}
1552 	if (pdq->pdq_type == PDQ_DEFPA)
1553 	    PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18);
1554     }
1555     return progress;
1556 }
1557 
1558 pdq_t *
pdq_initialize(pdq_bus_t bus,pdq_bus_memaddr_t csr_base,const char * name,int unit,void * ctx,pdq_type_t type)1559 pdq_initialize(
1560     pdq_bus_t bus,
1561     pdq_bus_memaddr_t csr_base,
1562     const char *name,
1563     int unit,
1564     void *ctx,
1565     pdq_type_t type)
1566 {
1567     pdq_t *pdq;
1568     pdq_state_t state;
1569     pdq_descriptor_block_t *dbp;
1570 #if !defined(PDQ_BUS_DMA)
1571     const pdq_uint32_t contig_bytes = (sizeof(pdq_descriptor_block_t) * 2) - PDQ_OS_PAGESIZE;
1572     pdq_uint8_t *p;
1573 #endif
1574     int idx;
1575 
1576     PDQ_ASSERT(sizeof(pdq_descriptor_block_t) == 8192);
1577     PDQ_ASSERT(sizeof(pdq_consumer_block_t) == 64);
1578     PDQ_ASSERT(sizeof(pdq_response_filter_get_t) == PDQ_SIZE_RESPONSE_FILTER_GET);
1579     PDQ_ASSERT(sizeof(pdq_cmd_addr_filter_set_t) == PDQ_SIZE_CMD_ADDR_FILTER_SET);
1580     PDQ_ASSERT(sizeof(pdq_response_addr_filter_get_t) == PDQ_SIZE_RESPONSE_ADDR_FILTER_GET);
1581     PDQ_ASSERT(sizeof(pdq_response_status_chars_get_t) == PDQ_SIZE_RESPONSE_STATUS_CHARS_GET);
1582     PDQ_ASSERT(sizeof(pdq_response_fddi_mib_get_t) == PDQ_SIZE_RESPONSE_FDDI_MIB_GET);
1583     PDQ_ASSERT(sizeof(pdq_response_dec_ext_mib_get_t) == PDQ_SIZE_RESPONSE_DEC_EXT_MIB_GET);
1584     PDQ_ASSERT(sizeof(pdq_unsolicited_event_t) == 512);
1585 
1586     pdq = (pdq_t *) PDQ_OS_MEMALLOC(sizeof(pdq_t));
1587     if (pdq == NULL) {
1588 	PDQ_PRINTF(("malloc(%d) failed\n", sizeof(*pdq)));
1589 	return NULL;
1590     }
1591     PDQ_OS_MEMZERO(pdq, sizeof(pdq_t));
1592     pdq->pdq_type = type;
1593     pdq->pdq_unit = unit;
1594     pdq->pdq_os_ctx = (void *) ctx;
1595     pdq->pdq_os_name = name;
1596     pdq->pdq_flags = PDQ_PRINTCHARS;
1597     /*
1598      * Allocate the additional data structures required by
1599      * the PDQ driver.  Allocate a contiguous region of memory
1600      * for the descriptor block.  We need to allocated enough
1601      * to guarantee that we will a get 8KB block of memory aligned
1602      * on a 8KB boundary.  This turns to require that we allocate
1603      * (N*2 - 1 page) pages of memory.  On machine with less than
1604      * a 8KB page size, it mean we will allocate more memory than
1605      * we need.  The extra will be used for the unsolicited event
1606      * buffers (though on machines with 8KB pages we will to allocate
1607      * them separately since there will be nothing left overs.)
1608      */
1609 #if defined(PDQ_OS_MEMALLOC_CONTIG)
1610     p = (pdq_uint8_t *) PDQ_OS_MEMALLOC_CONTIG(contig_bytes);
1611     if (p != NULL) {
1612 	pdq_physaddr_t physaddr = PDQ_OS_VA_TO_BUSPA(pdq, p);
1613 	/*
1614 	 * Assert that we really got contiguous memory.  This isn't really
1615 	 * needed on systems that actually have physical contiguous allocation
1616 	 * routines, but on those systems that don't ...
1617 	 */
1618 	for (idx = PDQ_OS_PAGESIZE; idx < 0x2000; idx += PDQ_OS_PAGESIZE) {
1619 	    if (PDQ_OS_VA_TO_BUSPA(pdq, p + idx) - physaddr != idx)
1620 		goto cleanup_and_return;
1621 	}
1622 	if (physaddr & 0x1FFF) {
1623 	    pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) p;
1624 	    pdq->pdq_unsolicited_info.ui_pa_bufstart = physaddr;
1625 	    pdq->pdq_dbp = (pdq_descriptor_block_t *) &p[0x2000 - (physaddr & 0x1FFF)];
1626 	    pdq->pdq_pa_descriptor_block = physaddr & ~0x1FFFUL;
1627 	} else {
1628 	    pdq->pdq_dbp = (pdq_descriptor_block_t *) p;
1629 	    pdq->pdq_pa_descriptor_block = physaddr;
1630 	    pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) &p[0x2000];
1631 	    pdq->pdq_unsolicited_info.ui_pa_bufstart = physaddr + 0x2000;
1632 	}
1633     }
1634     pdq->pdq_cbp = (volatile pdq_consumer_block_t *) &pdq->pdq_dbp->pdqdb_consumer;
1635     pdq->pdq_pa_consumer_block = PDQ_DB_BUSPA(pdq, pdq->pdq_cbp);
1636     if (contig_bytes == sizeof(pdq_descriptor_block_t)) {
1637 	pdq->pdq_unsolicited_info.ui_events =
1638 	    (pdq_unsolicited_event_t *) PDQ_OS_MEMALLOC(
1639 		PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t));
1640     }
1641 #else
1642     if (pdq_os_memalloc_contig(pdq))
1643 	goto cleanup_and_return;
1644 #endif
1645 
1646     /*
1647      * Make sure everything got allocated.  If not, free what did
1648      * get allocated and return.
1649      */
1650     if (pdq->pdq_dbp == NULL || pdq->pdq_unsolicited_info.ui_events == NULL) {
1651       cleanup_and_return:
1652 #ifdef PDQ_OS_MEMFREE_CONTIG
1653 	if (p /* pdq->pdq_dbp */ != NULL)
1654 	    PDQ_OS_MEMFREE_CONTIG(p /* pdq->pdq_dbp */, contig_bytes);
1655 	if (contig_bytes == sizeof(pdq_descriptor_block_t) && pdq->pdq_unsolicited_info.ui_events != NULL)
1656 	    PDQ_OS_MEMFREE(pdq->pdq_unsolicited_info.ui_events,
1657 			   PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t));
1658 #endif
1659 	PDQ_OS_MEMFREE(pdq, sizeof(pdq_t));
1660 	return NULL;
1661     }
1662     dbp = pdq->pdq_dbp;
1663 
1664     PDQ_PRINTF(("\nPDQ Descriptor Block = " PDQ_OS_PTR_FMT " (PA = 0x%x)\n", dbp, pdq->pdq_pa_descriptor_block));
1665     PDQ_PRINTF(("    Receive Queue          = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_receives));
1666     PDQ_PRINTF(("    Transmit Queue         = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_transmits));
1667     PDQ_PRINTF(("    Host SMT Queue         = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_host_smt));
1668     PDQ_PRINTF(("    Command Response Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_command_responses));
1669     PDQ_PRINTF(("    Command Request Queue  = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_command_requests));
1670     PDQ_PRINTF(("PDQ Consumer Block = " PDQ_OS_PTR_FMT "\n", pdq->pdq_cbp));
1671 
1672     /*
1673      * Zero out the descriptor block.  Not really required but
1674      * it pays to be neat.  This will also zero out the consumer
1675      * block, command pool, and buffer pointers for the receive
1676      * host_smt rings.
1677      */
1678     PDQ_OS_MEMZERO(dbp, sizeof(*dbp));
1679 
1680     /*
1681      * Initialize the CSR references.
1682      * the DEFAA (FutureBus+) skips a longword between registers
1683      */
1684     pdq_init_csrs(&pdq->pdq_csrs, bus, csr_base, pdq->pdq_type == PDQ_DEFAA ? 2 : 1);
1685     if (pdq->pdq_type == PDQ_DEFPA)
1686 	pdq_init_pci_csrs(&pdq->pdq_pci_csrs, bus, csr_base, 1);
1687 
1688     PDQ_PRINTF(("PDQ CSRs: BASE = " PDQ_OS_CSR_FMT "\n", pdq->pdq_csrs.csr_base));
1689     PDQ_PRINTF(("    Port Reset                = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1690 	   pdq->pdq_csrs.csr_port_reset, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_reset)));
1691     PDQ_PRINTF(("    Host Data                 = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1692 	   pdq->pdq_csrs.csr_host_data, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_data)));
1693     PDQ_PRINTF(("    Port Control              = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1694 	   pdq->pdq_csrs.csr_port_control, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_control)));
1695     PDQ_PRINTF(("    Port Data A               = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1696 	   pdq->pdq_csrs.csr_port_data_a, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_a)));
1697     PDQ_PRINTF(("    Port Data B               = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1698 	   pdq->pdq_csrs.csr_port_data_b, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_b)));
1699     PDQ_PRINTF(("    Port Status               = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1700 	   pdq->pdq_csrs.csr_port_status, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status)));
1701     PDQ_PRINTF(("    Host Int Type 0           = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1702 	   pdq->pdq_csrs.csr_host_int_type_0, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0)));
1703     PDQ_PRINTF(("    Host Int Enable           = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1704 	   pdq->pdq_csrs.csr_host_int_enable, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_enable)));
1705     PDQ_PRINTF(("    Type 2 Producer           = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1706 	   pdq->pdq_csrs.csr_type_2_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_type_2_producer)));
1707     PDQ_PRINTF(("    Command Response Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1708 	   pdq->pdq_csrs.csr_cmd_response_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_response_producer)));
1709     PDQ_PRINTF(("    Command Request Producer  = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1710 	   pdq->pdq_csrs.csr_cmd_request_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_request_producer)));
1711     PDQ_PRINTF(("    Host SMT Producer         = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1712 	   pdq->pdq_csrs.csr_host_smt_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_smt_producer)));
1713     PDQ_PRINTF(("    Unsolicited Producer      = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1714 	   pdq->pdq_csrs.csr_unsolicited_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_unsolicited_producer)));
1715 
1716     /*
1717      * Initialize the command information block
1718      */
1719     pdq->pdq_command_info.ci_request_bufstart = dbp->pdqdb_cmd_request_buf;
1720     pdq->pdq_command_info.ci_pa_request_bufstart = PDQ_DB_BUSPA(pdq, pdq->pdq_command_info.ci_request_bufstart);
1721     pdq->pdq_command_info.ci_pa_request_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_command_requests);
1722     PDQ_PRINTF(("PDQ Command Request Buffer = " PDQ_OS_PTR_FMT " (PA=0x%x)\n",
1723 		pdq->pdq_command_info.ci_request_bufstart,
1724 		pdq->pdq_command_info.ci_pa_request_bufstart));
1725     for (idx = 0; idx < sizeof(dbp->pdqdb_command_requests)/sizeof(dbp->pdqdb_command_requests[0]); idx++) {
1726 	pdq_txdesc_t *txd = &dbp->pdqdb_command_requests[idx];
1727 
1728 	txd->txd_pa_lo = htole32(pdq->pdq_command_info.ci_pa_request_bufstart);
1729 	txd->txd_pa_hi = htole32(PDQ_TXDESC_SOP | PDQ_TXDESC_EOP);
1730     }
1731     PDQ_OS_DESC_PRESYNC(pdq, dbp->pdqdb_command_requests,
1732 			sizeof(dbp->pdqdb_command_requests));
1733 
1734     pdq->pdq_command_info.ci_response_bufstart = dbp->pdqdb_cmd_response_buf;
1735     pdq->pdq_command_info.ci_pa_response_bufstart = PDQ_DB_BUSPA(pdq, pdq->pdq_command_info.ci_response_bufstart);
1736     pdq->pdq_command_info.ci_pa_response_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_command_responses);
1737     PDQ_PRINTF(("PDQ Command Response Buffer = " PDQ_OS_PTR_FMT " (PA=0x%x)\n",
1738 		pdq->pdq_command_info.ci_response_bufstart,
1739 		pdq->pdq_command_info.ci_pa_response_bufstart));
1740     for (idx = 0; idx < sizeof(dbp->pdqdb_command_responses)/sizeof(dbp->pdqdb_command_responses[0]); idx++) {
1741 	pdq_rxdesc_t *rxd = &dbp->pdqdb_command_responses[idx];
1742 
1743 	rxd->rxd_pa_hi = htole32(PDQ_RXDESC_SOP |
1744 	    PDQ_RXDESC_SEG_LEN(PDQ_SIZE_COMMAND_RESPONSE));
1745 	rxd->rxd_pa_lo = htole32(pdq->pdq_command_info.ci_pa_response_bufstart);
1746     }
1747     PDQ_OS_DESC_PRESYNC(pdq, dbp->pdqdb_command_responses,
1748 			sizeof(dbp->pdqdb_command_responses));
1749 
1750     /*
1751      * Initialize the unsolicited event information block
1752      */
1753     pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS;
1754     pdq->pdq_unsolicited_info.ui_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_unsolicited_events);
1755     PDQ_PRINTF(("PDQ Unsolicit Event Buffer = " PDQ_OS_PTR_FMT " (PA=0x%x)\n",
1756 		pdq->pdq_unsolicited_info.ui_events,
1757 		pdq->pdq_unsolicited_info.ui_pa_bufstart));
1758     for (idx = 0; idx < sizeof(dbp->pdqdb_unsolicited_events)/sizeof(dbp->pdqdb_unsolicited_events[0]); idx++) {
1759 	pdq_rxdesc_t *rxd = &dbp->pdqdb_unsolicited_events[idx];
1760 	pdq_unsolicited_event_t *event = &pdq->pdq_unsolicited_info.ui_events[idx & (PDQ_NUM_UNSOLICITED_EVENTS-1)];
1761 
1762 	rxd->rxd_pa_hi = htole32(PDQ_RXDESC_SOP |
1763 		PDQ_RXDESC_SEG_LEN(sizeof(pdq_unsolicited_event_t)));
1764 	rxd->rxd_pa_lo = htole32(pdq->pdq_unsolicited_info.ui_pa_bufstart + (const pdq_uint8_t *) event
1765 	    - (const pdq_uint8_t *) pdq->pdq_unsolicited_info.ui_events);
1766 	PDQ_OS_UNSOL_EVENT_PRESYNC(pdq, event);
1767     }
1768     PDQ_OS_DESC_PRESYNC(pdq, dbp->pdqdb_unsolicited_events,
1769 			sizeof(dbp->pdqdb_unsolicited_events));
1770 
1771     /*
1772      * Initialize the receive information blocks (normal and SMT).
1773      */
1774     pdq->pdq_rx_info.rx_buffers = pdq->pdq_receive_buffers;
1775     pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(dbp->pdqdb_receives);
1776     pdq->pdq_rx_info.rx_target = pdq->pdq_rx_info.rx_free - PDQ_RX_SEGCNT * 8;
1777     pdq->pdq_rx_info.rx_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_receives);
1778 
1779     pdq->pdq_host_smt_info.rx_buffers = pdq->pdq_host_smt_buffers;
1780     pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(dbp->pdqdb_host_smt);
1781     pdq->pdq_host_smt_info.rx_target = pdq->pdq_host_smt_info.rx_free - PDQ_RX_SEGCNT * 3;
1782     pdq->pdq_host_smt_info.rx_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_host_smt);
1783 
1784     /*
1785      * Initialize the transmit information block.
1786      */
1787     dbp->pdqdb_tx_hdr[0] = PDQ_FDDI_PH0;
1788     dbp->pdqdb_tx_hdr[1] = PDQ_FDDI_PH1;
1789     dbp->pdqdb_tx_hdr[2] = PDQ_FDDI_PH2;
1790     pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(dbp->pdqdb_transmits);
1791     pdq->pdq_tx_info.tx_hdrdesc.txd_pa_hi = htole32(PDQ_TXDESC_SOP|PDQ_TXDESC_SEG_LEN(3));
1792     pdq->pdq_tx_info.tx_hdrdesc.txd_pa_lo = htole32(PDQ_DB_BUSPA(pdq, dbp->pdqdb_tx_hdr));
1793     pdq->pdq_tx_info.tx_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_transmits);
1794 
1795     state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status));
1796     PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state]));
1797 
1798     /*
1799      * Stop the PDQ if it is running and put it into a known state.
1800      */
1801     state = pdq_stop(pdq);
1802 
1803     PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state]));
1804     PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1805     /*
1806      * If the adapter is not the state we expect, then the initialization
1807      * failed.  Cleanup and exit.
1808      */
1809 #if defined(PDQVERBOSE)
1810     if (state == PDQS_HALTED) {
1811 	pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status));
1812 	printf("Halt code = %d (%s)\n", halt_code, pdq_halt_codes[halt_code]);
1813 	if (halt_code == PDQH_DMA_ERROR && pdq->pdq_type == PDQ_DEFPA)
1814 	    PDQ_PRINTF(("PFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n",
1815 		       PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status),
1816 		       PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0) & PDQ_HOST_INT_FATAL_ERROR));
1817     }
1818 #endif
1819     if (state == PDQS_RESET || state == PDQS_HALTED || state == PDQS_UPGRADE)
1820 	goto cleanup_and_return;
1821 
1822     PDQ_PRINTF(("PDQ Hardware Address = %02x-%02x-%02x-%02x-%02x-%02x\n",
1823 	   pdq->pdq_hwaddr.lanaddr_bytes[0], pdq->pdq_hwaddr.lanaddr_bytes[1],
1824 	   pdq->pdq_hwaddr.lanaddr_bytes[2], pdq->pdq_hwaddr.lanaddr_bytes[3],
1825 	   pdq->pdq_hwaddr.lanaddr_bytes[4], pdq->pdq_hwaddr.lanaddr_bytes[5]));
1826     PDQ_PRINTF(("PDQ Firmware Revision = %c%c%c%c\n",
1827 	   pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1],
1828 	   pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3]));
1829     PDQ_PRINTF(("PDQ Chip Revision = "));
1830     switch (pdq->pdq_chip_rev) {
1831 	case PDQ_CHIP_REV_A_B_OR_C: PDQ_PRINTF(("Rev C or below")); break;
1832 	case PDQ_CHIP_REV_D: PDQ_PRINTF(("Rev D")); break;
1833 	case PDQ_CHIP_REV_E: PDQ_PRINTF(("Rev E")); break;
1834 	default: PDQ_PRINTF(("Unknown Rev %d", (int) pdq->pdq_chip_rev));
1835     }
1836     PDQ_PRINTF(("\n"));
1837 
1838     return pdq;
1839 }
1840