1 /*-
2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2016 Avago Technologies
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
29 *
30 * $FreeBSD: head/sys/dev/mpr/mpr.c 330789 2018-03-12 05:02:22Z scottl $
31 */
32
33 /* Communications core for Avago Technologies (LSI) MPT3 */
34
35 /* TODO Move headers to mprvar */
36 #include <sys/types.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/module.h>
42 #include <sys/bus.h>
43 #include <sys/conf.h>
44 #include <sys/bio.h>
45 #include <sys/malloc.h>
46 #include <sys/uio.h>
47 #include <sys/sysctl.h>
48 #include <sys/queue.h>
49 #include <sys/kthread.h>
50 #include <sys/taskqueue.h>
51 #include <sys/endian.h>
52 #include <sys/eventhandler.h>
53 #include <sys/sbuf.h>
54 #include <sys/caps.h>
55
56 #include <sys/rman.h>
57 #include <sys/proc.h>
58
59 #include <bus/pci/pcivar.h>
60
61 #include <bus/cam/cam.h>
62 #include <bus/cam/cam_ccb.h>
63 #include <bus/cam/scsi/scsi_all.h>
64
65 #include <dev/raid/mpr/mpi/mpi2_type.h>
66 #include <dev/raid/mpr/mpi/mpi2.h>
67 #include <dev/raid/mpr/mpi/mpi2_ioc.h>
68 #include <dev/raid/mpr/mpi/mpi2_sas.h>
69 #include <dev/raid/mpr/mpi/mpi2_pci.h>
70 #include <dev/raid/mpr/mpi/mpi2_cnfg.h>
71 #include <dev/raid/mpr/mpi/mpi2_init.h>
72 #include <dev/raid/mpr/mpi/mpi2_tool.h>
73 #include <dev/raid/mpr/mpr_ioctl.h>
74 #include <dev/raid/mpr/mprvar.h>
75 #include <dev/raid/mpr/mpr_table.h>
76 #include <dev/raid/mpr/mpr_sas.h>
77
78 static int mpr_diag_reset(struct mpr_softc *sc, int sleep_flag);
79 static int mpr_init_queues(struct mpr_softc *sc);
80 static void mpr_resize_queues(struct mpr_softc *sc);
81 static int mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag);
82 static int mpr_transition_operational(struct mpr_softc *sc);
83 static int mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching);
84 static void mpr_iocfacts_free(struct mpr_softc *sc);
85 static void mpr_startup(void *arg);
86 static int mpr_send_iocinit(struct mpr_softc *sc);
87 static int mpr_alloc_queues(struct mpr_softc *sc);
88 static int mpr_alloc_hw_queues(struct mpr_softc *sc);
89 static int mpr_alloc_replies(struct mpr_softc *sc);
90 static int mpr_alloc_requests(struct mpr_softc *sc);
91 static int mpr_alloc_nvme_prp_pages(struct mpr_softc *sc);
92 static int mpr_attach_log(struct mpr_softc *sc);
93 static __inline void mpr_complete_command(struct mpr_softc *sc,
94 struct mpr_command *cm);
95 static void mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data,
96 MPI2_EVENT_NOTIFICATION_REPLY *reply);
97 static void mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm);
98 static void mpr_periodic(void *);
99 static int mpr_reregister_events(struct mpr_softc *sc);
100 static void mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm);
101 static int mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts);
102 static int mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag);
103 static int mpr_dump_reqs(SYSCTL_HANDLER_ARGS);
104
105 SYSCTL_NODE(_hw, OID_AUTO, mpr, CTLFLAG_RD, 0, "MPR Driver Parameters");
106
107 MALLOC_DEFINE(M_MPR, "mpr", "mpr driver memory");
108
109 /*
110 * Do a "Diagnostic Reset" aka a hard reset. This should get the chip out of
111 * any state and back to its initialization state machine.
112 */
113 static char mpt2_reset_magic[] = { 0x00, 0x0f, 0x04, 0x0b, 0x02, 0x07, 0x0d };
114
115 /*
116 * Added this union to smoothly convert le64toh cm->cm_desc.Words.
117 * Compiler only supports uint64_t to be passed as an argument.
118 * Otherwise it will throw this error:
119 * "aggregate value used where an integer was expected"
120 */
121 typedef union _reply_descriptor {
122 u64 word;
123 struct {
124 u32 low;
125 u32 high;
126 } u;
127 } reply_descriptor, request_descriptor;
128
129 /* Rate limit chain-fail messages to 1 per minute */
130 static struct timeval mpr_chainfail_interval = { 60, 0 };
131
132 /*
133 * sleep_flag can be either CAN_SLEEP or NO_SLEEP.
134 * If this function is called from process context, it can sleep
135 * and there is no harm to sleep, in case if this fuction is called
136 * from Interrupt handler, we can not sleep and need NO_SLEEP flag set.
137 * based on sleep flags driver will call either msleep, pause or DELAY.
138 * msleep and pause are of same variant, but pause is used when mpr_mtx
139 * is not hold by driver.
140 */
141 static int
mpr_diag_reset(struct mpr_softc * sc,int sleep_flag)142 mpr_diag_reset(struct mpr_softc *sc,int sleep_flag)
143 {
144 uint32_t reg;
145 int i, error, tries = 0;
146 uint8_t first_wait_done = FALSE;
147
148 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
149
150 /* Clear any pending interrupts */
151 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
152
153 mpr_dprint(sc, MPR_INIT, "sequence start, sleep_flag=%d\n", sleep_flag);
154 /* Push the magic sequence */
155 error = ETIMEDOUT;
156 while (tries++ < 20) {
157 for (i = 0; i < sizeof(mpt2_reset_magic); i++)
158 mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET,
159 mpt2_reset_magic[i]);
160
161 /* wait 100 msec */
162 if (lockowned(&sc->mpr_lock) && sleep_flag == CAN_SLEEP)
163 lksleep(&sc->msleep_fake_chan, &sc->mpr_lock, 0,
164 "mprdiag", hz < 10 ? 1 : hz / 10);
165 else if (sleep_flag == CAN_SLEEP)
166 tsleep(mpr_diag_reset, 0, "mprdiag", hz < 10 ? 1 : hz / 10);
167 else
168 DELAY(100 * 1000);
169
170 reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET);
171 if (reg & MPI2_DIAG_DIAG_WRITE_ENABLE) {
172 error = 0;
173 break;
174 }
175 }
176 if (error) {
177 mpr_dprint(sc, MPR_INIT, "sequence failed, error=%d, exit\n",
178 error);
179 return (error);
180 }
181
182 /* Send the actual reset. XXX need to refresh the reg? */
183 reg |= MPI2_DIAG_RESET_ADAPTER;
184 mpr_dprint(sc, MPR_INIT, "sequence success, sending reset, reg= 0x%x\n",
185 reg);
186 mpr_regwrite(sc, MPI2_HOST_DIAGNOSTIC_OFFSET, reg);
187
188 /* Wait up to 300 seconds in 50ms intervals */
189 error = ETIMEDOUT;
190 for (i = 0; i < 6000; i++) {
191 /*
192 * Wait 50 msec. If this is the first time through, wait 256
193 * msec to satisfy Diag Reset timing requirements.
194 */
195 if (first_wait_done) {
196 if (lockowned(&sc->mpr_lock) && sleep_flag == CAN_SLEEP)
197 lksleep(&sc->msleep_fake_chan, &sc->mpr_lock, 0,
198 "mprdiag", hz < 20 ? 1 : hz / 20);
199 else if (sleep_flag == CAN_SLEEP)
200 tsleep(mpr_diag_reset, 0, "mprdiag", hz < 20 ? 1: hz / 20);
201 else
202 DELAY(50 * 1000);
203 } else {
204 DELAY(256 * 1000);
205 first_wait_done = TRUE;
206 }
207 /*
208 * Check for the RESET_ADAPTER bit to be cleared first, then
209 * wait for the RESET state to be cleared, which takes a little
210 * longer.
211 */
212 reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET);
213 if (reg & MPI2_DIAG_RESET_ADAPTER) {
214 continue;
215 }
216 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
217 if ((reg & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_RESET) {
218 error = 0;
219 break;
220 }
221 }
222 if (error) {
223 mpr_dprint(sc, MPR_INIT, "reset failed, error= %d, exit\n",
224 error);
225 return (error);
226 }
227
228 mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 0x0);
229 mpr_dprint(sc, MPR_INIT, "diag reset success, exit\n");
230
231 return (0);
232 }
233
234 static int
mpr_message_unit_reset(struct mpr_softc * sc,int sleep_flag)235 mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag)
236 {
237 int error;
238
239 MPR_FUNCTRACE(sc);
240
241 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
242
243 error = 0;
244 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET,
245 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET <<
246 MPI2_DOORBELL_FUNCTION_SHIFT);
247
248 if (mpr_wait_db_ack(sc, 5, sleep_flag) != 0) {
249 mpr_dprint(sc, MPR_INIT|MPR_FAULT,
250 "Doorbell handshake failed\n");
251 error = ETIMEDOUT;
252 }
253
254 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__);
255 return (error);
256 }
257
258 static int
mpr_transition_ready(struct mpr_softc * sc)259 mpr_transition_ready(struct mpr_softc *sc)
260 {
261 uint32_t reg, state;
262 int error, tries = 0;
263 int sleep_flags;
264
265 MPR_FUNCTRACE(sc);
266 /* If we are in attach call, do not sleep */
267 sleep_flags = (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE)
268 ? CAN_SLEEP : NO_SLEEP;
269
270 error = 0;
271
272 mpr_dprint(sc, MPR_INIT, "%s entered, sleep_flags= %d\n",
273 __func__, sleep_flags);
274
275 while (tries++ < 1200) {
276 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
277 mpr_dprint(sc, MPR_INIT, " Doorbell= 0x%x\n", reg);
278
279 /*
280 * Ensure the IOC is ready to talk. If it's not, try
281 * resetting it.
282 */
283 if (reg & MPI2_DOORBELL_USED) {
284 mpr_dprint(sc, MPR_INIT, " Not ready, sending diag "
285 "reset\n");
286 mpr_diag_reset(sc, sleep_flags);
287 DELAY(50000);
288 continue;
289 }
290
291 /* Is the adapter owned by another peer? */
292 if ((reg & MPI2_DOORBELL_WHO_INIT_MASK) ==
293 (MPI2_WHOINIT_PCI_PEER << MPI2_DOORBELL_WHO_INIT_SHIFT)) {
294 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC is under the "
295 "control of another peer host, aborting "
296 "initialization.\n");
297 error = ENXIO;
298 break;
299 }
300
301 state = reg & MPI2_IOC_STATE_MASK;
302 if (state == MPI2_IOC_STATE_READY) {
303 /* Ready to go! */
304 error = 0;
305 break;
306 } else if (state == MPI2_IOC_STATE_FAULT) {
307 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC in fault "
308 "state 0x%x, resetting\n",
309 state & MPI2_DOORBELL_FAULT_CODE_MASK);
310 mpr_diag_reset(sc, sleep_flags);
311 } else if (state == MPI2_IOC_STATE_OPERATIONAL) {
312 /* Need to take ownership */
313 mpr_message_unit_reset(sc, sleep_flags);
314 } else if (state == MPI2_IOC_STATE_RESET) {
315 /* Wait a bit, IOC might be in transition */
316 mpr_dprint(sc, MPR_INIT|MPR_FAULT,
317 "IOC in unexpected reset state\n");
318 } else {
319 mpr_dprint(sc, MPR_INIT|MPR_FAULT,
320 "IOC in unknown state 0x%x\n", state);
321 error = EINVAL;
322 break;
323 }
324
325 /* Wait 50ms for things to settle down. */
326 DELAY(50000);
327 }
328
329 if (error)
330 mpr_dprint(sc, MPR_INIT|MPR_FAULT,
331 "Cannot transition IOC to ready\n");
332 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__);
333 return (error);
334 }
335
336 static int
mpr_transition_operational(struct mpr_softc * sc)337 mpr_transition_operational(struct mpr_softc *sc)
338 {
339 uint32_t reg, state;
340 int error;
341
342 MPR_FUNCTRACE(sc);
343
344 error = 0;
345 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
346 mpr_dprint(sc, MPR_INIT, "%s entered, Doorbell= 0x%x\n", __func__, reg);
347
348 state = reg & MPI2_IOC_STATE_MASK;
349 if (state != MPI2_IOC_STATE_READY) {
350 mpr_dprint(sc, MPR_INIT, "IOC not ready\n");
351 if ((error = mpr_transition_ready(sc)) != 0) {
352 mpr_dprint(sc, MPR_INIT|MPR_FAULT,
353 "failed to transition ready, exit\n");
354 return (error);
355 }
356 }
357
358 error = mpr_send_iocinit(sc);
359 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__);
360
361 return (error);
362 }
363
364 static void
mpr_resize_queues(struct mpr_softc * sc)365 mpr_resize_queues(struct mpr_softc *sc)
366 {
367 u_int reqcr, prireqcr, maxio, sges_per_frame, chain_seg_size;
368
369 /*
370 * Size the queues. Since the reply queues always need one free
371 * entry, we'll deduct one reply message here. The LSI documents
372 * suggest instead to add a count to the request queue, but I think
373 * that it's better to deduct from reply queue.
374 */
375 prireqcr = MAX(1, sc->max_prireqframes);
376 prireqcr = MIN(prireqcr, sc->facts->HighPriorityCredit);
377
378 reqcr = MAX(2, sc->max_reqframes);
379 reqcr = MIN(reqcr, sc->facts->RequestCredit);
380
381 sc->num_reqs = prireqcr + reqcr;
382 sc->num_prireqs = prireqcr;
383 sc->num_replies = MIN(sc->max_replyframes + sc->max_evtframes,
384 sc->facts->MaxReplyDescriptorPostQueueDepth) - 1;
385
386 /* Store the request frame size in bytes rather than as 32bit words */
387 sc->reqframesz = sc->facts->IOCRequestFrameSize * 4;
388
389 /*
390 * Gen3 and beyond uses the IOCMaxChainSegmentSize from IOC Facts to
391 * get the size of a Chain Frame. Previous versions use the size as a
392 * Request Frame for the Chain Frame size. If IOCMaxChainSegmentSize
393 * is 0, use the default value. The IOCMaxChainSegmentSize is the
394 * number of 16-byte elelements that can fit in a Chain Frame, which is
395 * the size of an IEEE Simple SGE.
396 */
397 if (sc->facts->MsgVersion >= MPI2_VERSION_02_05) {
398 chain_seg_size = htole16(sc->facts->IOCMaxChainSegmentSize);
399 if (chain_seg_size == 0)
400 chain_seg_size = MPR_DEFAULT_CHAIN_SEG_SIZE;
401 sc->chain_frame_size = chain_seg_size *
402 MPR_MAX_CHAIN_ELEMENT_SIZE;
403 } else {
404 sc->chain_frame_size = sc->reqframesz;
405 }
406
407 /*
408 * Max IO Size is Page Size * the following:
409 * ((SGEs per frame - 1 for chain element) * Max Chain Depth)
410 * + 1 for no chain needed in last frame
411 *
412 * If user suggests a Max IO size to use, use the smaller of the
413 * user's value and the calculated value as long as the user's
414 * value is larger than 0. The user's value is in pages.
415 */
416 sges_per_frame = sc->chain_frame_size/sizeof(MPI2_IEEE_SGE_SIMPLE64)-1;
417 maxio = (sges_per_frame * sc->facts->MaxChainDepth + 1) * PAGE_SIZE;
418
419 /*
420 * If I/O size limitation requested then use it and pass up to CAM.
421 * If not, use MAXPHYS as an optimization hint, but report HW limit.
422 */
423 if (sc->max_io_pages > 0) {
424 maxio = min(maxio, sc->max_io_pages * PAGE_SIZE);
425 sc->maxio = maxio;
426 } else {
427 sc->maxio = maxio;
428 maxio = min(maxio, MAXPHYS);
429 }
430
431 sc->num_chains = (maxio / PAGE_SIZE + sges_per_frame - 2) /
432 sges_per_frame * reqcr;
433 if (sc->max_chains > 0 && sc->max_chains < sc->num_chains)
434 sc->num_chains = sc->max_chains;
435
436 /*
437 * Figure out the number of MSIx-based queues. If the firmware or
438 * user has done something crazy and not allowed enough credit for
439 * the queues to be useful then don't enable multi-queue.
440 */
441 if (sc->facts->MaxMSIxVectors < 2)
442 sc->msi_msgs = 1;
443
444 if (sc->msi_msgs > 1) {
445 sc->msi_msgs = MIN(sc->msi_msgs, ncpus);
446 sc->msi_msgs = MIN(sc->msi_msgs, sc->facts->MaxMSIxVectors);
447 if (sc->num_reqs / sc->msi_msgs < 2)
448 sc->msi_msgs = 1;
449 }
450
451 mpr_dprint(sc, MPR_INIT, "Sized queues to q=%d reqs=%d replies=%d\n",
452 sc->msi_msgs, sc->num_reqs, sc->num_replies);
453 }
454
455 /*
456 * This is called during attach and when re-initializing due to a Diag Reset.
457 * IOC Facts is used to allocate many of the structures needed by the driver.
458 * If called from attach, de-allocation is not required because the driver has
459 * not allocated any structures yet, but if called from a Diag Reset, previously
460 * allocated structures based on IOC Facts will need to be freed and re-
461 * allocated bases on the latest IOC Facts.
462 */
463 static int
mpr_iocfacts_allocate(struct mpr_softc * sc,uint8_t attaching)464 mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching)
465 {
466 int error;
467 Mpi2IOCFactsReply_t saved_facts;
468 uint8_t saved_mode, reallocating;
469
470 mpr_dprint(sc, MPR_INIT|MPR_TRACE, "%s entered\n", __func__);
471
472 /* Save old IOC Facts and then only reallocate if Facts have changed */
473 if (!attaching) {
474 bcopy(sc->facts, &saved_facts, sizeof(MPI2_IOC_FACTS_REPLY));
475 }
476
477 /*
478 * Get IOC Facts. In all cases throughout this function, panic if doing
479 * a re-initialization and only return the error if attaching so the OS
480 * can handle it.
481 */
482 if ((error = mpr_get_iocfacts(sc, sc->facts)) != 0) {
483 if (attaching) {
484 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to get "
485 "IOC Facts with error %d, exit\n", error);
486 return (error);
487 } else {
488 panic("%s failed to get IOC Facts with error %d\n",
489 __func__, error);
490 }
491 }
492
493 MPR_DPRINT_PAGE(sc, MPR_XINFO, iocfacts, sc->facts);
494
495 ksnprintf(sc->fw_version, sizeof(sc->fw_version),
496 "%02d.%02d.%02d.%02d",
497 sc->facts->FWVersion.Struct.Major,
498 sc->facts->FWVersion.Struct.Minor,
499 sc->facts->FWVersion.Struct.Unit,
500 sc->facts->FWVersion.Struct.Dev);
501
502 mpr_dprint(sc, MPR_INFO, "Firmware: %s, Driver: %s\n", sc->fw_version,
503 MPR_DRIVER_VERSION);
504 mpr_dprint(sc, MPR_INFO,
505 "IOCCapabilities: %pb%i\n",
506 "\20" "\3ScsiTaskFull" "\4DiagTrace" "\5SnapBuf" "\6ExtBuf"
507 "\7EEDP" "\10BiDirTarg" "\11Multicast" "\14TransRetry" "\15IR"
508 "\16EventReplay" "\17RaidAccel" "\20MSIXIndex" "\21HostDisc"
509 "\22FastPath" "\23RDPQArray" "\24AtomicReqDesc" "\25PCIeSRIOV",
510 sc->facts->IOCCapabilities);
511
512 /*
513 * If the chip doesn't support event replay then a hard reset will be
514 * required to trigger a full discovery. Do the reset here then
515 * retransition to Ready. A hard reset might have already been done,
516 * but it doesn't hurt to do it again. Only do this if attaching, not
517 * for a Diag Reset.
518 */
519 if (attaching && ((sc->facts->IOCCapabilities &
520 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY) == 0)) {
521 mpr_dprint(sc, MPR_INIT, "No event replay, resetting\n");
522 mpr_diag_reset(sc, NO_SLEEP);
523 if ((error = mpr_transition_ready(sc)) != 0) {
524 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to "
525 "transition to ready with error %d, exit\n",
526 error);
527 return (error);
528 }
529 }
530
531 /*
532 * Set flag if IR Firmware is loaded. If the RAID Capability has
533 * changed from the previous IOC Facts, log a warning, but only if
534 * checking this after a Diag Reset and not during attach.
535 */
536 saved_mode = sc->ir_firmware;
537 if (sc->facts->IOCCapabilities &
538 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)
539 sc->ir_firmware = 1;
540 if (!attaching) {
541 if (sc->ir_firmware != saved_mode) {
542 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "new IR/IT mode "
543 "in IOC Facts does not match previous mode\n");
544 }
545 }
546
547 /* Only deallocate and reallocate if relevant IOC Facts have changed */
548 reallocating = FALSE;
549 sc->mpr_flags &= ~MPR_FLAGS_REALLOCATED;
550
551 if ((!attaching) &&
552 ((saved_facts.MsgVersion != sc->facts->MsgVersion) ||
553 (saved_facts.HeaderVersion != sc->facts->HeaderVersion) ||
554 (saved_facts.MaxChainDepth != sc->facts->MaxChainDepth) ||
555 (saved_facts.RequestCredit != sc->facts->RequestCredit) ||
556 (saved_facts.ProductID != sc->facts->ProductID) ||
557 (saved_facts.IOCCapabilities != sc->facts->IOCCapabilities) ||
558 (saved_facts.IOCRequestFrameSize !=
559 sc->facts->IOCRequestFrameSize) ||
560 (saved_facts.IOCMaxChainSegmentSize !=
561 sc->facts->IOCMaxChainSegmentSize) ||
562 (saved_facts.MaxTargets != sc->facts->MaxTargets) ||
563 (saved_facts.MaxSasExpanders != sc->facts->MaxSasExpanders) ||
564 (saved_facts.MaxEnclosures != sc->facts->MaxEnclosures) ||
565 (saved_facts.HighPriorityCredit != sc->facts->HighPriorityCredit) ||
566 (saved_facts.MaxReplyDescriptorPostQueueDepth !=
567 sc->facts->MaxReplyDescriptorPostQueueDepth) ||
568 (saved_facts.ReplyFrameSize != sc->facts->ReplyFrameSize) ||
569 (saved_facts.MaxVolumes != sc->facts->MaxVolumes) ||
570 (saved_facts.MaxPersistentEntries !=
571 sc->facts->MaxPersistentEntries))) {
572 reallocating = TRUE;
573
574 /* Record that we reallocated everything */
575 sc->mpr_flags |= MPR_FLAGS_REALLOCATED;
576 }
577
578 /*
579 * Some things should be done if attaching or re-allocating after a Diag
580 * Reset, but are not needed after a Diag Reset if the FW has not
581 * changed.
582 */
583 if (attaching || reallocating) {
584 /*
585 * Check if controller supports FW diag buffers and set flag to
586 * enable each type.
587 */
588 if (sc->facts->IOCCapabilities &
589 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER)
590 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_TRACE].
591 enabled = TRUE;
592 if (sc->facts->IOCCapabilities &
593 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
594 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_SNAPSHOT].
595 enabled = TRUE;
596 if (sc->facts->IOCCapabilities &
597 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
598 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_EXTENDED].
599 enabled = TRUE;
600
601 /*
602 * Set flags for some supported items.
603 */
604 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP)
605 sc->eedp_enabled = TRUE;
606 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)
607 sc->control_TLR = TRUE;
608 if (sc->facts->IOCCapabilities &
609 MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
610 sc->atomic_desc_capable = TRUE;
611
612 mpr_resize_queues(sc);
613
614 /*
615 * Initialize all Tail Queues
616 */
617 TAILQ_INIT(&sc->req_list);
618 TAILQ_INIT(&sc->high_priority_req_list);
619 TAILQ_INIT(&sc->chain_list);
620 TAILQ_INIT(&sc->prp_page_list);
621 TAILQ_INIT(&sc->tm_list);
622 }
623
624 /*
625 * If doing a Diag Reset and the FW is significantly different
626 * (reallocating will be set above in IOC Facts comparison), then all
627 * buffers based on the IOC Facts will need to be freed before they are
628 * reallocated.
629 */
630 if (reallocating) {
631 mpr_iocfacts_free(sc);
632 mprsas_realloc_targets(sc, saved_facts.MaxTargets +
633 saved_facts.MaxVolumes);
634 }
635
636 /*
637 * Any deallocation has been completed. Now start reallocating
638 * if needed. Will only need to reallocate if attaching or if the new
639 * IOC Facts are different from the previous IOC Facts after a Diag
640 * Reset. Targets have already been allocated above if needed.
641 */
642 error = 0;
643 while (attaching || reallocating) {
644 if ((error = mpr_alloc_hw_queues(sc)) != 0)
645 break;
646 if ((error = mpr_alloc_replies(sc)) != 0)
647 break;
648 if ((error = mpr_alloc_requests(sc)) != 0)
649 break;
650 if ((error = mpr_alloc_queues(sc)) != 0)
651 break;
652 break;
653 }
654 if (error) {
655 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
656 "Failed to alloc queues with error %d\n", error);
657 mpr_free(sc);
658 return (error);
659 }
660
661 /* Always initialize the queues */
662 bzero(sc->free_queue, sc->fqdepth * 4);
663 mpr_init_queues(sc);
664
665 /*
666 * Always get the chip out of the reset state, but only panic if not
667 * attaching. If attaching and there is an error, that is handled by
668 * the OS.
669 */
670 error = mpr_transition_operational(sc);
671 if (error != 0) {
672 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to "
673 "transition to operational with error %d\n", error);
674 mpr_free(sc);
675 return (error);
676 }
677
678 /*
679 * Finish the queue initialization.
680 * These are set here instead of in mpr_init_queues() because the
681 * IOC resets these values during the state transition in
682 * mpr_transition_operational(). The free index is set to 1
683 * because the corresponding index in the IOC is set to 0, and the
684 * IOC treats the queues as full if both are set to the same value.
685 * Hence the reason that the queue can't hold all of the possible
686 * replies.
687 */
688 sc->replypostindex = 0;
689 mpr_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex);
690 mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 0);
691
692 /*
693 * Attach the subsystems so they can prepare their event masks.
694 * XXX Should be dynamic so that IM/IR and user modules can attach
695 */
696 error = 0;
697 while (attaching) {
698 mpr_dprint(sc, MPR_INIT, "Attaching subsystems\n");
699 if ((error = mpr_attach_log(sc)) != 0)
700 break;
701 if ((error = mpr_attach_sas(sc)) != 0)
702 break;
703 if ((error = mpr_attach_user(sc)) != 0)
704 break;
705 break;
706 }
707 if (error) {
708 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
709 "Failed to attach all subsystems: error %d\n", error);
710 mpr_free(sc);
711 return (error);
712 }
713
714 /*
715 * XXX If the number of MSI-X vectors changes during re-init, this
716 * won't see it and adjust.
717 */
718 if (attaching && (error = mpr_pci_setup_interrupts(sc)) != 0) {
719 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
720 "Failed to setup interrupts\n");
721 mpr_free(sc);
722 return (error);
723 }
724
725 return (error);
726 }
727
728 /*
729 * This is called if memory is being free (during detach for example) and when
730 * buffers need to be reallocated due to a Diag Reset.
731 */
732 static void
mpr_iocfacts_free(struct mpr_softc * sc)733 mpr_iocfacts_free(struct mpr_softc *sc)
734 {
735 struct mpr_command *cm;
736 int i;
737
738 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
739
740 if (sc->free_busaddr != 0)
741 bus_dmamap_unload(sc->queues_dmat, sc->queues_map);
742 if (sc->free_queue != NULL)
743 bus_dmamem_free(sc->queues_dmat, sc->free_queue,
744 sc->queues_map);
745 if (sc->queues_dmat != NULL)
746 bus_dma_tag_destroy(sc->queues_dmat);
747
748 if (sc->chain_frames != NULL) {
749 bus_dmamap_unload(sc->chain_dmat, sc->chain_map);
750 bus_dmamem_free(sc->chain_dmat, sc->chain_frames,
751 sc->chain_map);
752 }
753 if (sc->chain_dmat != NULL)
754 bus_dma_tag_destroy(sc->chain_dmat);
755
756 if (sc->sense_busaddr != 0)
757 bus_dmamap_unload(sc->sense_dmat, sc->sense_map);
758 if (sc->sense_frames != NULL)
759 bus_dmamem_free(sc->sense_dmat, sc->sense_frames,
760 sc->sense_map);
761 if (sc->sense_dmat != NULL)
762 bus_dma_tag_destroy(sc->sense_dmat);
763
764 if (sc->prp_page_busaddr != 0)
765 bus_dmamap_unload(sc->prp_page_dmat, sc->prp_page_map);
766 if (sc->prp_pages != NULL)
767 bus_dmamem_free(sc->prp_page_dmat, sc->prp_pages,
768 sc->prp_page_map);
769 if (sc->prp_page_dmat != NULL)
770 bus_dma_tag_destroy(sc->prp_page_dmat);
771
772 if (sc->reply_busaddr != 0)
773 bus_dmamap_unload(sc->reply_dmat, sc->reply_map);
774 if (sc->reply_frames != NULL)
775 bus_dmamem_free(sc->reply_dmat, sc->reply_frames,
776 sc->reply_map);
777 if (sc->reply_dmat != NULL)
778 bus_dma_tag_destroy(sc->reply_dmat);
779
780 if (sc->req_busaddr != 0)
781 bus_dmamap_unload(sc->req_dmat, sc->req_map);
782 if (sc->req_frames != NULL)
783 bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map);
784 if (sc->req_dmat != NULL)
785 bus_dma_tag_destroy(sc->req_dmat);
786
787 if (sc->chains != NULL)
788 kfree(sc->chains, M_MPR);
789 if (sc->prps != NULL)
790 kfree(sc->prps, M_MPR);
791 if (sc->commands != NULL) {
792 for (i = 1; i < sc->num_reqs; i++) {
793 cm = &sc->commands[i];
794 bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap);
795 }
796 kfree(sc->commands, M_MPR);
797 }
798 if (sc->buffer_dmat != NULL)
799 bus_dma_tag_destroy(sc->buffer_dmat);
800
801 mpr_pci_free_interrupts(sc);
802 kfree(sc->queues, M_MPR);
803 sc->queues = NULL;
804 }
805
806 /*
807 * The terms diag reset and hard reset are used interchangeably in the MPI
808 * docs to mean resetting the controller chip. In this code diag reset
809 * cleans everything up, and the hard reset function just sends the reset
810 * sequence to the chip. This should probably be refactored so that every
811 * subsystem gets a reset notification of some sort, and can clean up
812 * appropriately.
813 */
814 int
mpr_reinit(struct mpr_softc * sc)815 mpr_reinit(struct mpr_softc *sc)
816 {
817 int error;
818 struct mprsas_softc *sassc;
819
820 sassc = sc->sassc;
821
822 MPR_FUNCTRACE(sc);
823
824 KKASSERT(lockowned(&sc->mpr_lock));
825
826 mpr_dprint(sc, MPR_INIT|MPR_INFO, "Reinitializing controller\n");
827 if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) {
828 mpr_dprint(sc, MPR_INIT, "Reset already in progress\n");
829 return 0;
830 }
831
832 /*
833 * Make sure the completion callbacks can recognize they're getting
834 * a NULL cm_reply due to a reset.
835 */
836 sc->mpr_flags |= MPR_FLAGS_DIAGRESET;
837
838 /*
839 * Mask interrupts here.
840 */
841 mpr_dprint(sc, MPR_INIT, "Masking interrupts and resetting\n");
842 mpr_mask_intr(sc);
843
844 error = mpr_diag_reset(sc, CAN_SLEEP);
845 if (error != 0) {
846 panic("%s hard reset failed with error %d\n", __func__, error);
847 }
848
849 /* Restore the PCI state, including the MSI-X registers */
850 mpr_pci_restore(sc);
851
852 /* Give the I/O subsystem special priority to get itself prepared */
853 mprsas_handle_reinit(sc);
854
855 /*
856 * Get IOC Facts and allocate all structures based on this information.
857 * The attach function will also call mpr_iocfacts_allocate at startup.
858 * If relevant values have changed in IOC Facts, this function will free
859 * all of the memory based on IOC Facts and reallocate that memory.
860 */
861 if ((error = mpr_iocfacts_allocate(sc, FALSE)) != 0) {
862 panic("%s IOC Facts based allocation failed with error %d\n",
863 __func__, error);
864 }
865
866 /*
867 * Mapping structures will be re-allocated after getting IOC Page8, so
868 * free these structures here.
869 */
870 mpr_mapping_exit(sc);
871
872 /*
873 * The static page function currently read is IOC Page8. Others can be
874 * added in future. It's possible that the values in IOC Page8 have
875 * changed after a Diag Reset due to user modification, so always read
876 * these. Interrupts are masked, so unmask them before getting config
877 * pages.
878 */
879 mpr_unmask_intr(sc);
880 sc->mpr_flags &= ~MPR_FLAGS_DIAGRESET;
881 mpr_base_static_config_pages(sc);
882
883 /*
884 * Some mapping info is based in IOC Page8 data, so re-initialize the
885 * mapping tables.
886 */
887 mpr_mapping_initialize(sc);
888
889 /*
890 * Restart will reload the event masks clobbered by the reset, and
891 * then enable the port.
892 */
893 mpr_reregister_events(sc);
894
895 /* the end of discovery will release the simq, so we're done. */
896 mpr_dprint(sc, MPR_INIT|MPR_XINFO, "Finished sc %p post %u free %u\n",
897 sc, sc->replypostindex, sc->replyfreeindex);
898 mprsas_release_simq_reinit(sassc);
899 mpr_dprint(sc, MPR_INIT, "%s exit error= %d\n", __func__, error);
900
901 return 0;
902 }
903
904 /* Wait for the chip to ACK a word that we've put into its FIFO
905 * Wait for <timeout> seconds. In single loop wait for busy loop
906 * for 500 microseconds.
907 * Total is [ 0.5 * (2000 * <timeout>) ] in milliseconds.
908 * */
909 static int
mpr_wait_db_ack(struct mpr_softc * sc,int timeout,int sleep_flag)910 mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag)
911 {
912 u32 cntdn, count;
913 u32 int_status;
914 u32 doorbell;
915
916 count = 0;
917 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
918 do {
919 int_status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET);
920 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
921 mpr_dprint(sc, MPR_TRACE, "%s: successful count(%d), "
922 "timeout(%d)\n", __func__, count, timeout);
923 return 0;
924 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
925 doorbell = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
926 if ((doorbell & MPI2_IOC_STATE_MASK) ==
927 MPI2_IOC_STATE_FAULT) {
928 mpr_dprint(sc, MPR_FAULT,
929 "fault_state(0x%04x)!\n", doorbell);
930 return (EFAULT);
931 }
932 } else if (int_status == 0xFFFFFFFF)
933 goto out;
934
935 /*
936 * If it can sleep, sleep for 1 millisecond, else busy loop for
937 * 0.5 millisecond
938 */
939 if (lockowned(&sc->mpr_lock) && sleep_flag == CAN_SLEEP)
940 lksleep(&sc->msleep_fake_chan, &sc->mpr_lock, 0, "mprdba",
941 hz < 1000 ? 1 : hz / 1000);
942 else if (sleep_flag == CAN_SLEEP)
943 tsleep(mpr_wait_db_ack, 0, "mprdba", hz < 1000 ? 1 : hz / 1000);
944 else
945 DELAY(500);
946 count++;
947 } while (--cntdn);
948
949 out:
950 mpr_dprint(sc, MPR_FAULT, "%s: failed due to timeout count(%d), "
951 "int_status(%x)!\n", __func__, count, int_status);
952 return (ETIMEDOUT);
953 }
954
955 /* Wait for the chip to signal that the next word in its FIFO can be fetched */
956 static int
mpr_wait_db_int(struct mpr_softc * sc)957 mpr_wait_db_int(struct mpr_softc *sc)
958 {
959 int retry;
960
961 for (retry = 0; retry < MPR_DB_MAX_WAIT; retry++) {
962 if ((mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) &
963 MPI2_HIS_IOC2SYS_DB_STATUS) != 0)
964 return (0);
965 DELAY(2000);
966 }
967 return (ETIMEDOUT);
968 }
969
970 /* Step through the synchronous command state machine, i.e. "Doorbell mode" */
971 static int
mpr_request_sync(struct mpr_softc * sc,void * req,MPI2_DEFAULT_REPLY * reply,int req_sz,int reply_sz,int timeout)972 mpr_request_sync(struct mpr_softc *sc, void *req, MPI2_DEFAULT_REPLY *reply,
973 int req_sz, int reply_sz, int timeout)
974 {
975 uint32_t *data32;
976 uint16_t *data16;
977 int i, count, ioc_sz, residual;
978 int sleep_flags = CAN_SLEEP;
979
980 /* Step 1 */
981 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
982
983 /* Step 2 */
984 if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED)
985 return (EBUSY);
986
987 /* Step 3
988 * Announce that a message is coming through the doorbell. Messages
989 * are pushed at 32bit words, so round up if needed.
990 */
991 count = (req_sz + 3) / 4;
992 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET,
993 (MPI2_FUNCTION_HANDSHAKE << MPI2_DOORBELL_FUNCTION_SHIFT) |
994 (count << MPI2_DOORBELL_ADD_DWORDS_SHIFT));
995
996 /* Step 4 */
997 if (mpr_wait_db_int(sc) ||
998 (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) == 0) {
999 mpr_dprint(sc, MPR_FAULT, "Doorbell failed to activate\n");
1000 return (ENXIO);
1001 }
1002 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1003 if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) {
1004 mpr_dprint(sc, MPR_FAULT, "Doorbell handshake failed\n");
1005 return (ENXIO);
1006 }
1007
1008 /* Step 5 */
1009 /* Clock out the message data synchronously in 32-bit dwords*/
1010 data32 = (uint32_t *)req;
1011 for (i = 0; i < count; i++) {
1012 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, htole32(data32[i]));
1013 if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) {
1014 mpr_dprint(sc, MPR_FAULT,
1015 "Timeout while writing doorbell\n");
1016 return (ENXIO);
1017 }
1018 }
1019
1020 /* Step 6 */
1021 /* Clock in the reply in 16-bit words. The total length of the
1022 * message is always in the 4th byte, so clock out the first 2 words
1023 * manually, then loop the rest.
1024 */
1025 data16 = (uint16_t *)reply;
1026 if (mpr_wait_db_int(sc) != 0) {
1027 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 0\n");
1028 return (ENXIO);
1029 }
1030 data16[0] =
1031 mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK;
1032 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1033 if (mpr_wait_db_int(sc) != 0) {
1034 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 1\n");
1035 return (ENXIO);
1036 }
1037 data16[1] =
1038 mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK;
1039 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1040
1041 /* Number of 32bit words in the message */
1042 ioc_sz = reply->MsgLength;
1043
1044 /*
1045 * Figure out how many 16bit words to clock in without overrunning.
1046 * The precision loss with dividing reply_sz can safely be
1047 * ignored because the messages can only be multiples of 32bits.
1048 */
1049 residual = 0;
1050 count = MIN((reply_sz / 4), ioc_sz) * 2;
1051 if (count < ioc_sz * 2) {
1052 residual = ioc_sz * 2 - count;
1053 mpr_dprint(sc, MPR_ERROR, "Driver error, throwing away %d "
1054 "residual message words\n", residual);
1055 }
1056
1057 for (i = 2; i < count; i++) {
1058 if (mpr_wait_db_int(sc) != 0) {
1059 mpr_dprint(sc, MPR_FAULT,
1060 "Timeout reading doorbell %d\n", i);
1061 return (ENXIO);
1062 }
1063 data16[i] = mpr_regread(sc, MPI2_DOORBELL_OFFSET) &
1064 MPI2_DOORBELL_DATA_MASK;
1065 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1066 }
1067
1068 /*
1069 * Pull out residual words that won't fit into the provided buffer.
1070 * This keeps the chip from hanging due to a driver programming
1071 * error.
1072 */
1073 while (residual--) {
1074 if (mpr_wait_db_int(sc) != 0) {
1075 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell\n");
1076 return (ENXIO);
1077 }
1078 (void)mpr_regread(sc, MPI2_DOORBELL_OFFSET);
1079 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1080 }
1081
1082 /* Step 7 */
1083 if (mpr_wait_db_int(sc) != 0) {
1084 mpr_dprint(sc, MPR_FAULT, "Timeout waiting to exit doorbell\n");
1085 return (ENXIO);
1086 }
1087 if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED)
1088 mpr_dprint(sc, MPR_FAULT, "Warning, doorbell still active\n");
1089 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1090
1091 return (0);
1092 }
1093
1094 static void
mpr_enqueue_request(struct mpr_softc * sc,struct mpr_command * cm)1095 mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm)
1096 {
1097 request_descriptor rd;
1098
1099 MPR_FUNCTRACE(sc);
1100 mpr_dprint(sc, MPR_TRACE, "SMID %u cm %p ccb %p\n",
1101 cm->cm_desc.Default.SMID, cm, cm->cm_ccb);
1102
1103 if (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE && !(sc->mpr_flags &
1104 MPR_FLAGS_SHUTDOWN))
1105 KKASSERT(lockowned(&sc->mpr_lock));
1106
1107 if (++sc->io_cmds_active > sc->io_cmds_highwater)
1108 sc->io_cmds_highwater++;
1109
1110 KASSERT(cm->cm_state == MPR_CM_STATE_BUSY, ("command not busy\n"));
1111 cm->cm_state = MPR_CM_STATE_INQUEUE;
1112
1113 if (sc->atomic_desc_capable) {
1114 rd.u.low = cm->cm_desc.Words.Low;
1115 mpr_regwrite(sc, MPI26_ATOMIC_REQUEST_DESCRIPTOR_POST_OFFSET,
1116 rd.u.low);
1117 } else {
1118 rd.u.low = cm->cm_desc.Words.Low;
1119 rd.u.high = cm->cm_desc.Words.High;
1120 rd.word = htole64(rd.word);
1121 mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET,
1122 rd.u.low);
1123 mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET,
1124 rd.u.high);
1125 }
1126 }
1127
1128 /*
1129 * Just the FACTS, ma'am.
1130 */
1131 static int
mpr_get_iocfacts(struct mpr_softc * sc,MPI2_IOC_FACTS_REPLY * facts)1132 mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts)
1133 {
1134 MPI2_DEFAULT_REPLY *reply;
1135 MPI2_IOC_FACTS_REQUEST request;
1136 int error, req_sz, reply_sz;
1137
1138 MPR_FUNCTRACE(sc);
1139 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
1140
1141 req_sz = sizeof(MPI2_IOC_FACTS_REQUEST);
1142 reply_sz = sizeof(MPI2_IOC_FACTS_REPLY);
1143 reply = (MPI2_DEFAULT_REPLY *)facts;
1144
1145 bzero(&request, req_sz);
1146 request.Function = MPI2_FUNCTION_IOC_FACTS;
1147 error = mpr_request_sync(sc, &request, reply, req_sz, reply_sz, 5);
1148
1149 mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
1150 return (error);
1151 }
1152
1153 static int
mpr_send_iocinit(struct mpr_softc * sc)1154 mpr_send_iocinit(struct mpr_softc *sc)
1155 {
1156 MPI2_IOC_INIT_REQUEST init;
1157 MPI2_DEFAULT_REPLY reply;
1158 int req_sz, reply_sz, error;
1159 struct timeval now;
1160 uint64_t time_in_msec;
1161
1162 MPR_FUNCTRACE(sc);
1163 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
1164
1165 /* Do a quick sanity check on proper initialization */
1166 if ((sc->pqdepth == 0) || (sc->fqdepth == 0) || (sc->reqframesz == 0)
1167 || (sc->replyframesz == 0)) {
1168 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
1169 "Driver not fully initialized for IOCInit\n");
1170 return (EINVAL);
1171 }
1172
1173 req_sz = sizeof(MPI2_IOC_INIT_REQUEST);
1174 reply_sz = sizeof(MPI2_IOC_INIT_REPLY);
1175 bzero(&init, req_sz);
1176 bzero(&reply, reply_sz);
1177
1178 /*
1179 * Fill in the init block. Note that most addresses are
1180 * deliberately in the lower 32bits of memory. This is a micro-
1181 * optimzation for PCI/PCIX, though it's not clear if it helps PCIe.
1182 */
1183 init.Function = MPI2_FUNCTION_IOC_INIT;
1184 init.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
1185 init.MsgVersion = htole16(MPI2_VERSION);
1186 init.HeaderVersion = htole16(MPI2_HEADER_VERSION);
1187 init.SystemRequestFrameSize = htole16((uint16_t)(sc->reqframesz / 4));
1188 init.ReplyDescriptorPostQueueDepth = htole16(sc->pqdepth);
1189 init.ReplyFreeQueueDepth = htole16(sc->fqdepth);
1190 init.SenseBufferAddressHigh = 0;
1191 init.SystemReplyAddressHigh = 0;
1192 init.SystemRequestFrameBaseAddress.High = 0;
1193 init.SystemRequestFrameBaseAddress.Low =
1194 htole32((uint32_t)sc->req_busaddr);
1195 init.ReplyDescriptorPostQueueAddress.High = 0;
1196 init.ReplyDescriptorPostQueueAddress.Low =
1197 htole32((uint32_t)sc->post_busaddr);
1198 init.ReplyFreeQueueAddress.High = 0;
1199 init.ReplyFreeQueueAddress.Low = htole32((uint32_t)sc->free_busaddr);
1200 getmicrotime(&now);
1201 time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000);
1202 init.TimeStamp.High = htole32((time_in_msec >> 32) & 0xFFFFFFFF);
1203 init.TimeStamp.Low = htole32(time_in_msec & 0xFFFFFFFF);
1204 init.HostPageSize = HOST_PAGE_SIZE_4K;
1205
1206 error = mpr_request_sync(sc, &init, &reply, req_sz, reply_sz, 5);
1207 if ((reply.IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
1208 error = ENXIO;
1209
1210 mpr_dprint(sc, MPR_INIT, "IOCInit status= 0x%x\n", reply.IOCStatus);
1211 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__);
1212 return (error);
1213 }
1214
1215 void
mpr_memaddr_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1216 mpr_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1217 {
1218 bus_addr_t *addr;
1219
1220 addr = arg;
1221 *addr = segs[0].ds_addr;
1222 }
1223
1224 void
mpr_memaddr_wait_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1225 mpr_memaddr_wait_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1226 {
1227 struct mpr_busdma_context *ctx;
1228 int need_unload, need_free;
1229
1230 ctx = (struct mpr_busdma_context *)arg;
1231 need_unload = 0;
1232 need_free = 0;
1233
1234 mpr_lock(ctx->softc);
1235 ctx->error = error;
1236 ctx->completed = 1;
1237 if ((error == 0) && (ctx->abandoned == 0)) {
1238 *ctx->addr = segs[0].ds_addr;
1239 } else {
1240 if (nsegs != 0)
1241 need_unload = 1;
1242 if (ctx->abandoned != 0)
1243 need_free = 1;
1244 }
1245 if (need_free == 0)
1246 wakeup(ctx);
1247
1248 mpr_unlock(ctx->softc);
1249
1250 if (need_unload != 0) {
1251 bus_dmamap_unload(ctx->buffer_dmat,
1252 ctx->buffer_dmamap);
1253 *ctx->addr = 0;
1254 }
1255
1256 if (need_free != 0)
1257 kfree(ctx, M_MPR);
1258 }
1259
1260 static int
mpr_alloc_queues(struct mpr_softc * sc)1261 mpr_alloc_queues(struct mpr_softc *sc)
1262 {
1263 struct mpr_queue *q;
1264 int nq, i;
1265
1266 nq = sc->msi_msgs;
1267 mpr_dprint(sc, MPR_INIT|MPR_XINFO, "Allocating %d I/O queues\n", nq);
1268
1269 sc->queues = kmalloc(sizeof(struct mpr_queue) * nq, M_MPR,
1270 M_NOWAIT|M_ZERO);
1271 if (sc->queues == NULL)
1272 return (ENOMEM);
1273
1274 for (i = 0; i < nq; i++) {
1275 q = &sc->queues[i];
1276 mpr_dprint(sc, MPR_INIT, "Configuring queue %d %p\n", i, q);
1277 q->sc = sc;
1278 q->qnum = i;
1279 }
1280 return (0);
1281 }
1282
1283 static int
mpr_alloc_hw_queues(struct mpr_softc * sc)1284 mpr_alloc_hw_queues(struct mpr_softc *sc)
1285 {
1286 bus_addr_t queues_busaddr;
1287 uint8_t *queues;
1288 int qsize, fqsize, pqsize;
1289
1290 /*
1291 * The reply free queue contains 4 byte entries in multiples of 16 and
1292 * aligned on a 16 byte boundary. There must always be an unused entry.
1293 * This queue supplies fresh reply frames for the firmware to use.
1294 *
1295 * The reply descriptor post queue contains 8 byte entries in
1296 * multiples of 16 and aligned on a 16 byte boundary. This queue
1297 * contains filled-in reply frames sent from the firmware to the host.
1298 *
1299 * These two queues are allocated together for simplicity.
1300 */
1301 sc->fqdepth = roundup2(sc->num_replies + 1, 16);
1302 sc->pqdepth = roundup2(sc->num_replies + 1, 16);
1303 fqsize= sc->fqdepth * 4;
1304 pqsize = sc->pqdepth * 8;
1305 qsize = fqsize + pqsize;
1306
1307 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
1308 16, 0, /* algnmnt, boundary */
1309 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1310 BUS_SPACE_MAXADDR, /* highaddr */
1311 qsize, /* maxsize */
1312 1, /* nsegments */
1313 qsize, /* maxsegsize */
1314 0, /* flags */
1315 &sc->queues_dmat)) {
1316 mpr_dprint(sc, MPR_ERROR, "Cannot allocate queues DMA tag\n");
1317 return (ENOMEM);
1318 }
1319 if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT,
1320 &sc->queues_map)) {
1321 mpr_dprint(sc, MPR_ERROR, "Cannot allocate queues memory\n");
1322 return (ENOMEM);
1323 }
1324 bzero(queues, qsize);
1325 bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize,
1326 mpr_memaddr_cb, &queues_busaddr, 0);
1327
1328 sc->free_queue = (uint32_t *)queues;
1329 sc->free_busaddr = queues_busaddr;
1330 sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize);
1331 sc->post_busaddr = queues_busaddr + fqsize;
1332 mpr_dprint(sc, MPR_INIT, "free queue busaddr= %#016jx size= %d\n",
1333 (uintmax_t)sc->free_busaddr, fqsize);
1334 mpr_dprint(sc, MPR_INIT, "reply queue busaddr= %#016jx size= %d\n",
1335 (uintmax_t)sc->post_busaddr, pqsize);
1336
1337 return (0);
1338 }
1339
1340 static int
mpr_alloc_replies(struct mpr_softc * sc)1341 mpr_alloc_replies(struct mpr_softc *sc)
1342 {
1343 int rsize, num_replies;
1344
1345 /* Store the reply frame size in bytes rather than as 32bit words */
1346 sc->replyframesz = sc->facts->ReplyFrameSize * 4;
1347
1348 /*
1349 * sc->num_replies should be one less than sc->fqdepth. We need to
1350 * allocate space for sc->fqdepth replies, but only sc->num_replies
1351 * replies can be used at once.
1352 */
1353 num_replies = max(sc->fqdepth, sc->num_replies);
1354
1355 rsize = sc->replyframesz * num_replies;
1356 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
1357 4, 0, /* algnmnt, boundary */
1358 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1359 BUS_SPACE_MAXADDR, /* highaddr */
1360 rsize, /* maxsize */
1361 1, /* nsegments */
1362 rsize, /* maxsegsize */
1363 0, /* flags */
1364 &sc->reply_dmat)) {
1365 mpr_dprint(sc, MPR_ERROR, "Cannot allocate replies DMA tag\n");
1366 return (ENOMEM);
1367 }
1368 if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames,
1369 BUS_DMA_NOWAIT, &sc->reply_map)) {
1370 mpr_dprint(sc, MPR_ERROR, "Cannot allocate replies memory\n");
1371 return (ENOMEM);
1372 }
1373 bzero(sc->reply_frames, rsize);
1374 bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize,
1375 mpr_memaddr_cb, &sc->reply_busaddr, 0);
1376 mpr_dprint(sc, MPR_INIT, "reply frames busaddr= %#016jx size= %d\n",
1377 (uintmax_t)sc->reply_busaddr, rsize);
1378
1379 return (0);
1380 }
1381
1382 static void
mpr_load_chains_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1383 mpr_load_chains_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1384 {
1385 struct mpr_softc *sc = arg;
1386 struct mpr_chain *chain;
1387 bus_size_t bo;
1388 int i, o, s;
1389
1390 if (error != 0)
1391 return;
1392
1393 for (i = 0, o = 0, s = 0; s < nsegs; s++) {
1394 for (bo = 0; bo + sc->chain_frame_size <= segs[s].ds_len;
1395 bo += sc->chain_frame_size) {
1396 chain = &sc->chains[i++];
1397 chain->chain =(MPI2_SGE_IO_UNION *)(sc->chain_frames+o);
1398 chain->chain_busaddr = segs[s].ds_addr + bo;
1399 o += sc->chain_frame_size;
1400 mpr_free_chain(sc, chain);
1401 }
1402 if (bo != segs[s].ds_len)
1403 o += segs[s].ds_len - bo;
1404 }
1405 sc->chain_free_lowwater = i;
1406 }
1407
1408 static int
mpr_alloc_requests(struct mpr_softc * sc)1409 mpr_alloc_requests(struct mpr_softc *sc)
1410 {
1411 struct mpr_command *cm;
1412 int i, rsize, nsegs;
1413
1414 rsize = sc->reqframesz * sc->num_reqs;
1415 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
1416 16, 0, /* algnmnt, boundary */
1417 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1418 BUS_SPACE_MAXADDR, /* highaddr */
1419 rsize, /* maxsize */
1420 1, /* nsegments */
1421 rsize, /* maxsegsize */
1422 0, /* flags */
1423 &sc->req_dmat)) {
1424 mpr_dprint(sc, MPR_ERROR, "Cannot allocate request DMA tag\n");
1425 return (ENOMEM);
1426 }
1427 if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames,
1428 BUS_DMA_NOWAIT, &sc->req_map)) {
1429 mpr_dprint(sc, MPR_ERROR, "Cannot allocate request memory\n");
1430 return (ENOMEM);
1431 }
1432 bzero(sc->req_frames, rsize);
1433 bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize,
1434 mpr_memaddr_cb, &sc->req_busaddr, 0);
1435 mpr_dprint(sc, MPR_INIT, "request frames busaddr= %#016jx size= %d\n",
1436 (uintmax_t)sc->req_busaddr, rsize);
1437
1438 sc->chains = kmalloc(sizeof(struct mpr_chain) * sc->num_chains, M_MPR,
1439 M_NOWAIT | M_ZERO);
1440 if (!sc->chains) {
1441 mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain memory\n");
1442 return (ENOMEM);
1443 }
1444 rsize = sc->chain_frame_size * sc->num_chains;
1445 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
1446 16, 0, /* algnmnt, boundary */
1447 BUS_SPACE_MAXADDR, /* lowaddr */
1448 BUS_SPACE_MAXADDR, /* highaddr */
1449 rsize, /* maxsize */
1450 howmany(rsize, PAGE_SIZE), /* nsegments */
1451 rsize, /* maxsegsize */
1452 0, /* flags */
1453 &sc->chain_dmat)) {
1454 mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain DMA tag\n");
1455 return (ENOMEM);
1456 }
1457 if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames,
1458 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->chain_map)) {
1459 mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain memory\n");
1460 return (ENOMEM);
1461 }
1462 if (bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames,
1463 rsize, mpr_load_chains_cb, sc, BUS_DMA_NOWAIT)) {
1464 mpr_dprint(sc, MPR_ERROR, "Cannot load chain memory\n");
1465 bus_dmamem_free(sc->chain_dmat, sc->chain_frames,
1466 sc->chain_map);
1467 return (ENOMEM);
1468 }
1469
1470 rsize = MPR_SENSE_LEN * sc->num_reqs;
1471 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
1472 1, 0, /* algnmnt, boundary */
1473 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1474 BUS_SPACE_MAXADDR, /* highaddr */
1475 rsize, /* maxsize */
1476 1, /* nsegments */
1477 rsize, /* maxsegsize */
1478 0, /* flags */
1479 &sc->sense_dmat)) {
1480 mpr_dprint(sc, MPR_ERROR, "Cannot allocate sense DMA tag\n");
1481 return (ENOMEM);
1482 }
1483 if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames,
1484 BUS_DMA_NOWAIT, &sc->sense_map)) {
1485 mpr_dprint(sc, MPR_ERROR, "Cannot allocate sense memory\n");
1486 return (ENOMEM);
1487 }
1488 bzero(sc->sense_frames, rsize);
1489 bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize,
1490 mpr_memaddr_cb, &sc->sense_busaddr, 0);
1491 mpr_dprint(sc, MPR_INIT, "sense frames busaddr= %#016jx size= %d\n",
1492 (uintmax_t)sc->sense_busaddr, rsize);
1493
1494 /*
1495 * Allocate NVMe PRP Pages for NVMe SGL support only if the FW supports
1496 * these devices.
1497 */
1498 if ((sc->facts->MsgVersion >= MPI2_VERSION_02_06) &&
1499 (sc->facts->ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES)) {
1500 if (mpr_alloc_nvme_prp_pages(sc) == ENOMEM)
1501 return (ENOMEM);
1502 }
1503
1504 nsegs = (sc->maxio / PAGE_SIZE) + 1;
1505 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
1506 1, 0, /* algnmnt, boundary */
1507 BUS_SPACE_MAXADDR, /* lowaddr */
1508 BUS_SPACE_MAXADDR, /* highaddr */
1509 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
1510 nsegs, /* nsegments */
1511 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1512 BUS_DMA_ALLOCNOW, /* flags */
1513 &sc->buffer_dmat)) {
1514 mpr_dprint(sc, MPR_ERROR, "Cannot allocate buffer DMA tag\n");
1515 return (ENOMEM);
1516 }
1517
1518 /*
1519 * SMID 0 cannot be used as a free command per the firmware spec.
1520 * Just drop that command instead of risking accounting bugs.
1521 */
1522 sc->commands = kmalloc(sizeof(struct mpr_command) * sc->num_reqs,
1523 M_MPR, M_WAITOK | M_ZERO);
1524 if (!sc->commands) {
1525 mpr_dprint(sc, MPR_ERROR, "Cannot allocate command memory\n");
1526 return (ENOMEM);
1527 }
1528 for (i = 1; i < sc->num_reqs; i++) {
1529 cm = &sc->commands[i];
1530 cm->cm_req = sc->req_frames + i * sc->reqframesz;
1531 cm->cm_req_busaddr = sc->req_busaddr + i * sc->reqframesz;
1532 cm->cm_sense = &sc->sense_frames[i];
1533 cm->cm_sense_busaddr = sc->sense_busaddr + i * MPR_SENSE_LEN;
1534 cm->cm_desc.Default.SMID = i;
1535 cm->cm_sc = sc;
1536 cm->cm_state = MPR_CM_STATE_BUSY;
1537 TAILQ_INIT(&cm->cm_chain_list);
1538 TAILQ_INIT(&cm->cm_prp_page_list);
1539 callout_init_lk(&cm->cm_callout, &sc->mpr_lock);
1540
1541 /* XXX Is a failure here a critical problem? */
1542 if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap)
1543 == 0) {
1544 if (i <= sc->num_prireqs)
1545 mpr_free_high_priority_command(sc, cm);
1546 else
1547 mpr_free_command(sc, cm);
1548 } else {
1549 panic("failed to allocate command %d\n", i);
1550 sc->num_reqs = i;
1551 break;
1552 }
1553 }
1554
1555 return (0);
1556 }
1557
1558 /*
1559 * Allocate contiguous buffers for PCIe NVMe devices for building native PRPs,
1560 * which are scatter/gather lists for NVMe devices.
1561 *
1562 * This buffer must be contiguous due to the nature of how NVMe PRPs are built
1563 * and translated by FW.
1564 *
1565 * returns ENOMEM if memory could not be allocated, otherwise returns 0.
1566 */
1567 static int
mpr_alloc_nvme_prp_pages(struct mpr_softc * sc)1568 mpr_alloc_nvme_prp_pages(struct mpr_softc *sc)
1569 {
1570 int PRPs_per_page, PRPs_required, pages_required;
1571 int rsize, i;
1572 struct mpr_prp_page *prp_page;
1573
1574 /*
1575 * Assuming a MAX_IO_SIZE of 1MB and a PAGE_SIZE of 4k, the max number
1576 * of PRPs (NVMe's Scatter/Gather Element) needed per I/O is:
1577 * MAX_IO_SIZE / PAGE_SIZE = 256
1578 *
1579 * 1 PRP entry in main frame for PRP list pointer still leaves 255 PRPs
1580 * required for the remainder of the 1MB I/O. 512 PRPs can fit into one
1581 * page (4096 / 8 = 512), so only one page is required for each I/O.
1582 *
1583 * Each of these buffers will need to be contiguous. For simplicity,
1584 * only one buffer is allocated here, which has all of the space
1585 * required for the NVMe Queue Depth. If there are problems allocating
1586 * this one buffer, this function will need to change to allocate
1587 * individual, contiguous NVME_QDEPTH buffers.
1588 *
1589 * The real calculation will use the real max io size. Above is just an
1590 * example.
1591 *
1592 */
1593 PRPs_required = sc->maxio / PAGE_SIZE;
1594 PRPs_per_page = (PAGE_SIZE / PRP_ENTRY_SIZE) - 1;
1595 pages_required = (PRPs_required / PRPs_per_page) + 1;
1596
1597 sc->prp_buffer_size = PAGE_SIZE * pages_required;
1598 rsize = sc->prp_buffer_size * NVME_QDEPTH;
1599 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
1600 4, 0, /* algnmnt, boundary */
1601 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1602 BUS_SPACE_MAXADDR, /* highaddr */
1603 rsize, /* maxsize */
1604 1, /* nsegments */
1605 rsize, /* maxsegsize */
1606 0, /* flags */
1607 &sc->prp_page_dmat)) {
1608 mpr_dprint(sc, MPR_ERROR, "Cannot allocate NVMe PRP DMA "
1609 "tag\n");
1610 return (ENOMEM);
1611 }
1612 if (bus_dmamem_alloc(sc->prp_page_dmat, (void **)&sc->prp_pages,
1613 BUS_DMA_NOWAIT, &sc->prp_page_map)) {
1614 mpr_dprint(sc, MPR_ERROR, "Cannot allocate NVMe PRP memory\n");
1615 return (ENOMEM);
1616 }
1617 bzero(sc->prp_pages, rsize);
1618 bus_dmamap_load(sc->prp_page_dmat, sc->prp_page_map, sc->prp_pages,
1619 rsize, mpr_memaddr_cb, &sc->prp_page_busaddr, 0);
1620
1621 sc->prps = kmalloc(sizeof(struct mpr_prp_page) * NVME_QDEPTH, M_MPR,
1622 M_WAITOK | M_ZERO);
1623 for (i = 0; i < NVME_QDEPTH; i++) {
1624 prp_page = &sc->prps[i];
1625 prp_page->prp_page = (uint64_t *)(sc->prp_pages +
1626 i * sc->prp_buffer_size);
1627 prp_page->prp_page_busaddr = (uint64_t)(sc->prp_page_busaddr +
1628 i * sc->prp_buffer_size);
1629 mpr_free_prp_page(sc, prp_page);
1630 sc->prp_pages_free_lowwater++;
1631 }
1632
1633 return (0);
1634 }
1635
1636 static int
mpr_init_queues(struct mpr_softc * sc)1637 mpr_init_queues(struct mpr_softc *sc)
1638 {
1639 int i;
1640
1641 memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8);
1642
1643 /*
1644 * According to the spec, we need to use one less reply than we
1645 * have space for on the queue. So sc->num_replies (the number we
1646 * use) should be less than sc->fqdepth (allocated size).
1647 */
1648 if (sc->num_replies >= sc->fqdepth)
1649 return (EINVAL);
1650
1651 /*
1652 * Initialize all of the free queue entries.
1653 */
1654 for (i = 0; i < sc->fqdepth; i++) {
1655 sc->free_queue[i] = sc->reply_busaddr + (i * sc->replyframesz);
1656 }
1657 sc->replyfreeindex = sc->num_replies;
1658
1659 return (0);
1660 }
1661
1662 /* Get the driver parameter tunables. Lowest priority are the driver defaults.
1663 * Next are the global settings, if they exist. Highest are the per-unit
1664 * settings, if they exist.
1665 */
1666 void
mpr_get_tunables(struct mpr_softc * sc)1667 mpr_get_tunables(struct mpr_softc *sc)
1668 {
1669 char tmpstr[80];
1670
1671 /* XXX default to some debugging for now */
1672 sc->mpr_debug = MPR_INFO | MPR_FAULT;
1673 sc->msi_enable = 1;
1674 sc->max_chains = MPR_CHAIN_FRAMES;
1675 sc->max_io_pages = MPR_MAXIO_PAGES;
1676 sc->enable_ssu = MPR_SSU_ENABLE_SSD_DISABLE_HDD;
1677 sc->spinup_wait_time = DEFAULT_SPINUP_WAIT;
1678 sc->use_phynum = 1;
1679 sc->max_reqframes = MPR_REQ_FRAMES;
1680 sc->max_prireqframes = MPR_PRI_REQ_FRAMES;
1681 sc->max_replyframes = MPR_REPLY_FRAMES;
1682 sc->max_evtframes = MPR_EVT_REPLY_FRAMES;
1683
1684 /*
1685 * Grab the global variables.
1686 */
1687 TUNABLE_INT_FETCH("hw.mpr.debug_level", &sc->mpr_debug);
1688 TUNABLE_INT_FETCH("hw.mpr.msi_enable", &sc->msi_enable);
1689 TUNABLE_INT_FETCH("hw.mpr.max_chains", &sc->max_chains);
1690 TUNABLE_INT_FETCH("hw.mpr.max_io_pages", &sc->max_io_pages);
1691 TUNABLE_INT_FETCH("hw.mpr.enable_ssu", &sc->enable_ssu);
1692 TUNABLE_INT_FETCH("hw.mpr.spinup_wait_time", &sc->spinup_wait_time);
1693 TUNABLE_INT_FETCH("hw.mpr.use_phy_num", &sc->use_phynum);
1694 TUNABLE_INT_FETCH("hw.mpr.max_reqframes", &sc->max_reqframes);
1695 TUNABLE_INT_FETCH("hw.mpr.max_prireqframes", &sc->max_prireqframes);
1696 TUNABLE_INT_FETCH("hw.mpr.max_replyframes", &sc->max_replyframes);
1697 TUNABLE_INT_FETCH("hw.mpr.max_evtframes", &sc->max_evtframes);
1698
1699 /* Grab the unit-instance variables */
1700 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.debug_level",
1701 device_get_unit(sc->mpr_dev));
1702 TUNABLE_INT_FETCH(tmpstr, &sc->mpr_debug);
1703
1704 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.msi_enable",
1705 device_get_unit(sc->mpr_dev));
1706 TUNABLE_INT_FETCH(tmpstr, &sc->msi_enable);
1707
1708 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_chains",
1709 device_get_unit(sc->mpr_dev));
1710 TUNABLE_INT_FETCH(tmpstr, &sc->max_chains);
1711
1712 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_io_pages",
1713 device_get_unit(sc->mpr_dev));
1714 TUNABLE_INT_FETCH(tmpstr, &sc->max_io_pages);
1715
1716 bzero(sc->exclude_ids, sizeof(sc->exclude_ids));
1717 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.exclude_ids",
1718 device_get_unit(sc->mpr_dev));
1719 TUNABLE_STR_FETCH(tmpstr, sc->exclude_ids, sizeof(sc->exclude_ids));
1720
1721 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.enable_ssu",
1722 device_get_unit(sc->mpr_dev));
1723 TUNABLE_INT_FETCH(tmpstr, &sc->enable_ssu);
1724
1725 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.spinup_wait_time",
1726 device_get_unit(sc->mpr_dev));
1727 TUNABLE_INT_FETCH(tmpstr, &sc->spinup_wait_time);
1728
1729 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.use_phy_num",
1730 device_get_unit(sc->mpr_dev));
1731 TUNABLE_INT_FETCH(tmpstr, &sc->use_phynum);
1732
1733 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_reqframes",
1734 device_get_unit(sc->mpr_dev));
1735 TUNABLE_INT_FETCH(tmpstr, &sc->max_reqframes);
1736
1737 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_prireqframes",
1738 device_get_unit(sc->mpr_dev));
1739 TUNABLE_INT_FETCH(tmpstr, &sc->max_prireqframes);
1740
1741 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_replyframes",
1742 device_get_unit(sc->mpr_dev));
1743 TUNABLE_INT_FETCH(tmpstr, &sc->max_replyframes);
1744
1745 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_evtframes",
1746 device_get_unit(sc->mpr_dev));
1747 TUNABLE_INT_FETCH(tmpstr, &sc->max_evtframes);
1748 }
1749
1750 static void
mpr_setup_sysctl(struct mpr_softc * sc)1751 mpr_setup_sysctl(struct mpr_softc *sc)
1752 {
1753 struct sysctl_ctx_list *sysctl_ctx = NULL;
1754 struct sysctl_oid *sysctl_tree = NULL;
1755 char tmpstr[80], tmpstr2[80];
1756
1757 /*
1758 * Setup the sysctl variable so the user can change the debug level
1759 * on the fly.
1760 */
1761 ksnprintf(tmpstr, sizeof(tmpstr), "MPR controller %d",
1762 device_get_unit(sc->mpr_dev));
1763 ksnprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mpr_dev));
1764
1765 sysctl_ctx = device_get_sysctl_ctx(sc->mpr_dev);
1766 if (sysctl_ctx != NULL)
1767 sysctl_tree = device_get_sysctl_tree(sc->mpr_dev);
1768
1769 if (sysctl_tree == NULL) {
1770 sysctl_ctx_init(&sc->sysctl_ctx);
1771 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
1772 SYSCTL_STATIC_CHILDREN(_hw_mpr), OID_AUTO, tmpstr2,
1773 CTLFLAG_RD, 0, tmpstr);
1774 if (sc->sysctl_tree == NULL)
1775 return;
1776 sysctl_ctx = &sc->sysctl_ctx;
1777 sysctl_tree = sc->sysctl_tree;
1778 }
1779
1780 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1781 OID_AUTO, "debug_level", CTLFLAG_RW, &sc->mpr_debug, 0,
1782 "mpr debug level");
1783
1784 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1785 OID_AUTO, "max_reqframes", CTLFLAG_RD, &sc->max_reqframes, 0,
1786 "Total number of allocated request frames");
1787
1788 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1789 OID_AUTO, "max_prireqframes", CTLFLAG_RD, &sc->max_prireqframes, 0,
1790 "Total number of allocated high priority request frames");
1791
1792 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1793 OID_AUTO, "max_replyframes", CTLFLAG_RD, &sc->max_replyframes, 0,
1794 "Total number of allocated reply frames");
1795
1796 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1797 OID_AUTO, "max_evtframes", CTLFLAG_RD, &sc->max_evtframes, 0,
1798 "Total number of event frames allocated");
1799
1800 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1801 OID_AUTO, "firmware_version", CTLFLAG_RW, sc->fw_version,
1802 strlen(sc->fw_version), "firmware version");
1803
1804 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1805 OID_AUTO, "driver_version", CTLFLAG_RW, MPR_DRIVER_VERSION,
1806 strlen(MPR_DRIVER_VERSION), "driver version");
1807
1808 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1809 OID_AUTO, "io_cmds_active", CTLFLAG_RD,
1810 &sc->io_cmds_active, 0, "number of currently active commands");
1811
1812 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1813 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
1814 &sc->io_cmds_highwater, 0, "maximum active commands seen");
1815
1816 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1817 OID_AUTO, "chain_free", CTLFLAG_RD,
1818 &sc->chain_free, 0, "number of free chain elements");
1819
1820 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1821 OID_AUTO, "chain_free_lowwater", CTLFLAG_RD,
1822 &sc->chain_free_lowwater, 0,"lowest number of free chain elements");
1823
1824 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1825 OID_AUTO, "max_chains", CTLFLAG_RD,
1826 &sc->max_chains, 0,"maximum chain frames that will be allocated");
1827
1828 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1829 OID_AUTO, "max_io_pages", CTLFLAG_RD,
1830 &sc->max_io_pages, 0,"maximum pages to allow per I/O (if <1 use "
1831 "IOCFacts)");
1832
1833 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1834 OID_AUTO, "enable_ssu", CTLFLAG_RW, &sc->enable_ssu, 0,
1835 "enable SSU to SATA SSD/HDD at shutdown");
1836
1837 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1838 OID_AUTO, "chain_alloc_fail", CTLFLAG_RD,
1839 &sc->chain_alloc_fail, 0, "chain allocation failures");
1840
1841 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1842 OID_AUTO, "spinup_wait_time", CTLFLAG_RD,
1843 &sc->spinup_wait_time, DEFAULT_SPINUP_WAIT, "seconds to wait for "
1844 "spinup after SATA ID error");
1845
1846 SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1847 OID_AUTO, "dump_reqs", CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_SKIP, sc, 0,
1848 mpr_dump_reqs, "I", "Dump Active Requests");
1849
1850 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1851 OID_AUTO, "use_phy_num", CTLFLAG_RD, &sc->use_phynum, 0,
1852 "Use the phy number for enumeration");
1853
1854 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1855 OID_AUTO, "prp_pages_free", CTLFLAG_RD,
1856 &sc->prp_pages_free, 0, "number of free PRP pages");
1857
1858 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1859 OID_AUTO, "prp_pages_free_lowwater", CTLFLAG_RD,
1860 &sc->prp_pages_free_lowwater, 0,"lowest number of free PRP pages");
1861
1862 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1863 OID_AUTO, "prp_page_alloc_fail", CTLFLAG_RD,
1864 &sc->prp_page_alloc_fail, 0, "PRP page allocation failures");
1865 }
1866
1867 struct mpr_dumpreq_hdr {
1868 uint32_t smid;
1869 uint32_t state;
1870 uint32_t numframes;
1871 uint32_t deschi;
1872 uint32_t desclo;
1873 };
1874
1875 static int
mpr_dump_reqs(SYSCTL_HANDLER_ARGS)1876 mpr_dump_reqs(SYSCTL_HANDLER_ARGS)
1877 {
1878 struct mpr_softc *sc;
1879 struct mpr_chain *chain, *chain1;
1880 struct mpr_command *cm;
1881 struct mpr_dumpreq_hdr hdr;
1882 struct sbuf *sb;
1883 uint32_t smid, state;
1884 int i, numreqs, error = 0;
1885
1886 sc = (struct mpr_softc *)arg1;
1887
1888 if ((error = caps_priv_check_self(SYSCAP_NODRIVER)) != 0)
1889 return (error);
1890
1891 state = MPR_CM_STATE_INQUEUE;
1892 smid = 1;
1893 numreqs = sc->num_reqs;
1894
1895 if (req->newptr != NULL)
1896 return (EINVAL);
1897
1898 if (smid == 0 || smid > sc->num_reqs)
1899 return (EINVAL);
1900 if (numreqs <= 0 || (numreqs + smid > sc->num_reqs))
1901 numreqs = sc->num_reqs;
1902 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
1903
1904 /* Best effort, no locking */
1905 for (i = smid; i < numreqs; i++) {
1906 cm = &sc->commands[i];
1907 if (cm->cm_state != state)
1908 continue;
1909 hdr.smid = i;
1910 hdr.state = cm->cm_state;
1911 hdr.numframes = 1;
1912 hdr.deschi = cm->cm_desc.Words.High;
1913 hdr.desclo = cm->cm_desc.Words.Low;
1914 TAILQ_FOREACH_MUTABLE(chain, &cm->cm_chain_list, chain_link,
1915 chain1)
1916 hdr.numframes++;
1917 sbuf_bcat(sb, &hdr, sizeof(hdr));
1918 sbuf_bcat(sb, cm->cm_req, 128);
1919 TAILQ_FOREACH_MUTABLE(chain, &cm->cm_chain_list, chain_link,
1920 chain1)
1921 sbuf_bcat(sb, chain->chain, 128);
1922 }
1923
1924 error = sbuf_finish(sb);
1925 sbuf_delete(sb);
1926 return (error);
1927 }
1928
1929 int
mpr_attach(struct mpr_softc * sc)1930 mpr_attach(struct mpr_softc *sc)
1931 {
1932 int error;
1933
1934 MPR_FUNCTRACE(sc);
1935 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
1936
1937 lockinit(&sc->mpr_lock, "MPR lock", 0, LK_CANRECURSE);
1938 callout_init_lk(&sc->periodic, &sc->mpr_lock);
1939 callout_init_lk(&sc->device_check_callout, &sc->mpr_lock);
1940 TAILQ_INIT(&sc->event_list);
1941 timevalclear(&sc->lastfail);
1942
1943 if ((error = mpr_transition_ready(sc)) != 0) {
1944 mpr_dprint(sc, MPR_INIT|MPR_FAULT,
1945 "Failed to transition ready\n");
1946 return (error);
1947 }
1948
1949 sc->facts = kmalloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPR,
1950 M_ZERO|M_NOWAIT);
1951 if (!sc->facts) {
1952 mpr_dprint(sc, MPR_INIT|MPR_FAULT,
1953 "Cannot allocate memory, exit\n");
1954 return (ENOMEM);
1955 }
1956
1957 /*
1958 * Get IOC Facts and allocate all structures based on this information.
1959 * A Diag Reset will also call mpr_iocfacts_allocate and re-read the IOC
1960 * Facts. If relevant values have changed in IOC Facts, this function
1961 * will free all of the memory based on IOC Facts and reallocate that
1962 * memory. If this fails, any allocated memory should already be freed.
1963 */
1964 if ((error = mpr_iocfacts_allocate(sc, TRUE)) != 0) {
1965 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC Facts allocation "
1966 "failed with error %d\n", error);
1967 return (error);
1968 }
1969
1970 /* Start the periodic watchdog check on the IOC Doorbell */
1971 mpr_periodic(sc);
1972
1973 /*
1974 * The portenable will kick off discovery events that will drive the
1975 * rest of the initialization process. The CAM/SAS module will
1976 * hold up the boot sequence until discovery is complete.
1977 */
1978 sc->mpr_ich.ich_func = mpr_startup;
1979 sc->mpr_ich.ich_arg = sc;
1980 if (config_intrhook_establish(&sc->mpr_ich) != 0) {
1981 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
1982 "Cannot establish MPR config hook\n");
1983 error = EINVAL;
1984 }
1985
1986 /*
1987 * Allow IR to shutdown gracefully when shutdown occurs.
1988 */
1989 sc->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final,
1990 mprsas_ir_shutdown, sc, SHUTDOWN_PRI_DEFAULT);
1991
1992 if (sc->shutdown_eh == NULL)
1993 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
1994 "shutdown event registration failed\n");
1995
1996 mpr_setup_sysctl(sc);
1997
1998 sc->mpr_flags |= MPR_FLAGS_ATTACH_DONE;
1999 mpr_dprint(sc, MPR_INIT, "%s exit error= %d\n", __func__, error);
2000
2001 return (error);
2002 }
2003
2004 /* Run through any late-start handlers. */
2005 static void
mpr_startup(void * arg)2006 mpr_startup(void *arg)
2007 {
2008 struct mpr_softc *sc;
2009
2010 sc = (struct mpr_softc *)arg;
2011 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
2012
2013 mpr_lock(sc);
2014 mpr_unmask_intr(sc);
2015
2016 /* initialize device mapping tables */
2017 mpr_base_static_config_pages(sc);
2018 mpr_mapping_initialize(sc);
2019 mprsas_startup(sc);
2020 mpr_unlock(sc);
2021
2022 mpr_dprint(sc, MPR_INIT, "disestablish config intrhook\n");
2023 config_intrhook_disestablish(&sc->mpr_ich);
2024 sc->mpr_ich.ich_arg = NULL;
2025
2026 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__);
2027 }
2028
2029 /* Periodic watchdog. Is called with the driver lock already held. */
2030 static void
mpr_periodic(void * arg)2031 mpr_periodic(void *arg)
2032 {
2033 struct mpr_softc *sc;
2034 uint32_t db;
2035
2036 sc = (struct mpr_softc *)arg;
2037 if (sc->mpr_flags & MPR_FLAGS_SHUTDOWN)
2038 return;
2039
2040 db = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
2041 if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
2042 if ((db & MPI2_DOORBELL_FAULT_CODE_MASK) ==
2043 IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED) {
2044 panic("TEMPERATURE FAULT: STOPPING.");
2045 }
2046 mpr_dprint(sc, MPR_FAULT, "IOC Fault 0x%08x, Resetting\n", db);
2047 mpr_reinit(sc);
2048 }
2049
2050 callout_reset(&sc->periodic, MPR_PERIODIC_DELAY * hz, mpr_periodic, sc);
2051 }
2052
2053 static void
mpr_log_evt_handler(struct mpr_softc * sc,uintptr_t data,MPI2_EVENT_NOTIFICATION_REPLY * event)2054 mpr_log_evt_handler(struct mpr_softc *sc, uintptr_t data,
2055 MPI2_EVENT_NOTIFICATION_REPLY *event)
2056 {
2057 MPI2_EVENT_DATA_LOG_ENTRY_ADDED *entry;
2058
2059 MPR_DPRINT_EVENT(sc, generic, event);
2060
2061 switch (event->Event) {
2062 case MPI2_EVENT_LOG_DATA:
2063 mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_DATA:\n");
2064 if (sc->mpr_debug & MPR_EVENT)
2065 hexdump(event->EventData, event->EventDataLength, NULL,
2066 0);
2067 break;
2068 case MPI2_EVENT_LOG_ENTRY_ADDED:
2069 entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData;
2070 mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_ENTRY_ADDED event "
2071 "0x%x Sequence %d:\n", entry->LogEntryQualifier,
2072 entry->LogSequence);
2073 break;
2074 default:
2075 break;
2076 }
2077 return;
2078 }
2079
2080 static int
mpr_attach_log(struct mpr_softc * sc)2081 mpr_attach_log(struct mpr_softc *sc)
2082 {
2083 uint8_t events[16];
2084
2085 bzero(events, 16);
2086 setbit(events, MPI2_EVENT_LOG_DATA);
2087 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
2088
2089 mpr_register_events(sc, events, mpr_log_evt_handler, NULL,
2090 &sc->mpr_log_eh);
2091
2092 return (0);
2093 }
2094
2095 static int
mpr_detach_log(struct mpr_softc * sc)2096 mpr_detach_log(struct mpr_softc *sc)
2097 {
2098
2099 if (sc->mpr_log_eh != NULL)
2100 mpr_deregister_events(sc, sc->mpr_log_eh);
2101 return (0);
2102 }
2103
2104 /*
2105 * Free all of the driver resources and detach submodules. Should be called
2106 * without the lock held.
2107 */
2108 int
mpr_free(struct mpr_softc * sc)2109 mpr_free(struct mpr_softc *sc)
2110 {
2111 int error;
2112
2113 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
2114 /* Turn off the watchdog */
2115 mpr_lock(sc);
2116 sc->mpr_flags |= MPR_FLAGS_SHUTDOWN;
2117 mpr_unlock(sc);
2118 /* Lock must not be held for this */
2119 callout_drain(&sc->periodic);
2120 callout_drain(&sc->device_check_callout);
2121
2122 if (((error = mpr_detach_log(sc)) != 0) ||
2123 ((error = mpr_detach_sas(sc)) != 0)) {
2124 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "failed to detach "
2125 "subsystems, error= %d, exit\n", error);
2126 return (error);
2127 }
2128
2129 mpr_detach_user(sc);
2130
2131 /* Put the IOC back in the READY state. */
2132 mpr_lock(sc);
2133 if ((error = mpr_transition_ready(sc)) != 0) {
2134 mpr_unlock(sc);
2135 return (error);
2136 }
2137 mpr_unlock(sc);
2138
2139 if (sc->facts != NULL)
2140 kfree(sc->facts, M_MPR);
2141
2142 /*
2143 * Free all buffers that are based on IOC Facts. A Diag Reset may need
2144 * to free these buffers too.
2145 */
2146 mpr_iocfacts_free(sc);
2147
2148 if (sc->sysctl_tree != NULL)
2149 sysctl_ctx_free(&sc->sysctl_ctx);
2150
2151 /* Deregister the shutdown function */
2152 if (sc->shutdown_eh != NULL)
2153 EVENTHANDLER_DEREGISTER(shutdown_final, sc->shutdown_eh);
2154
2155 lockuninit(&sc->mpr_lock);
2156 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__);
2157
2158 return (0);
2159 }
2160
2161 static __inline void
mpr_complete_command(struct mpr_softc * sc,struct mpr_command * cm)2162 mpr_complete_command(struct mpr_softc *sc, struct mpr_command *cm)
2163 {
2164 MPR_FUNCTRACE(sc);
2165
2166 if (cm == NULL) {
2167 mpr_dprint(sc, MPR_ERROR, "Completing NULL command\n");
2168 return;
2169 }
2170
2171 if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
2172 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
2173
2174 if (cm->cm_complete != NULL) {
2175 mpr_dprint(sc, MPR_TRACE,
2176 "%s cm %p calling cm_complete %p data %p reply %p\n",
2177 __func__, cm, cm->cm_complete, cm->cm_complete_data,
2178 cm->cm_reply);
2179 cm->cm_complete(sc, cm);
2180 }
2181
2182 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
2183 mpr_dprint(sc, MPR_TRACE, "waking up %p\n", cm);
2184 wakeup(cm);
2185 }
2186
2187 if (sc->io_cmds_active != 0) {
2188 sc->io_cmds_active--;
2189 } else {
2190 mpr_dprint(sc, MPR_ERROR, "Warning: io_cmds_active is "
2191 "out of sync - resynching to 0\n");
2192 }
2193 }
2194
2195 static void
mpr_sas_log_info(struct mpr_softc * sc,u32 log_info)2196 mpr_sas_log_info(struct mpr_softc *sc , u32 log_info)
2197 {
2198 union loginfo_type {
2199 u32 loginfo;
2200 struct {
2201 u32 subcode:16;
2202 u32 code:8;
2203 u32 originator:4;
2204 u32 bus_type:4;
2205 } dw;
2206 };
2207 union loginfo_type sas_loginfo;
2208 char *originator_str = NULL;
2209
2210 sas_loginfo.loginfo = log_info;
2211 if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
2212 return;
2213
2214 /* each nexus loss loginfo */
2215 if (log_info == 0x31170000)
2216 return;
2217
2218 /* eat the loginfos associated with task aborts */
2219 if ((log_info == 30050000) || (log_info == 0x31140000) ||
2220 (log_info == 0x31130000))
2221 return;
2222
2223 switch (sas_loginfo.dw.originator) {
2224 case 0:
2225 originator_str = "IOP";
2226 break;
2227 case 1:
2228 originator_str = "PL";
2229 break;
2230 case 2:
2231 originator_str = "IR";
2232 break;
2233 }
2234
2235 mpr_dprint(sc, MPR_LOG, "log_info(0x%08x): originator(%s), "
2236 "code(0x%02x), sub_code(0x%04x)\n", log_info, originator_str,
2237 sas_loginfo.dw.code, sas_loginfo.dw.subcode);
2238 }
2239
2240 static void
mpr_display_reply_info(struct mpr_softc * sc,uint8_t * reply)2241 mpr_display_reply_info(struct mpr_softc *sc, uint8_t *reply)
2242 {
2243 MPI2DefaultReply_t *mpi_reply;
2244 u16 sc_status;
2245
2246 mpi_reply = (MPI2DefaultReply_t*)reply;
2247 sc_status = le16toh(mpi_reply->IOCStatus);
2248 if (sc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
2249 mpr_sas_log_info(sc, le32toh(mpi_reply->IOCLogInfo));
2250 }
2251
2252 void
mpr_intr(void * data)2253 mpr_intr(void *data)
2254 {
2255 struct mpr_softc *sc;
2256 uint32_t status;
2257
2258 sc = (struct mpr_softc *)data;
2259 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
2260
2261 /*
2262 * Check interrupt status register to flush the bus. This is
2263 * needed for both INTx interrupts and driver-driven polling
2264 */
2265 status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET);
2266 if ((status & MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT) == 0)
2267 return;
2268
2269 mpr_lock(sc);
2270 mpr_intr_locked(data);
2271 mpr_unlock(sc);
2272 return;
2273 }
2274
2275 /*
2276 * In theory, MSI/MSIX interrupts shouldn't need to read any registers on the
2277 * chip. Hopefully this theory is correct.
2278 */
2279 void
mpr_intr_msi(void * data)2280 mpr_intr_msi(void *data)
2281 {
2282 struct mpr_softc *sc;
2283
2284 sc = (struct mpr_softc *)data;
2285 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
2286 mpr_lock(sc);
2287 mpr_intr_locked(data);
2288 mpr_unlock(sc);
2289 return;
2290 }
2291
2292 /*
2293 * The locking is overly broad and simplistic, but easy to deal with for now.
2294 */
2295 void
mpr_intr_locked(void * data)2296 mpr_intr_locked(void *data)
2297 {
2298 MPI2_REPLY_DESCRIPTORS_UNION *desc;
2299 struct mpr_softc *sc;
2300 struct mpr_command *cm = NULL;
2301 uint8_t flags;
2302 u_int pq;
2303 MPI2_DIAG_RELEASE_REPLY *rel_rep;
2304 mpr_fw_diagnostic_buffer_t *pBuffer;
2305
2306 sc = (struct mpr_softc *)data;
2307
2308 pq = sc->replypostindex;
2309 mpr_dprint(sc, MPR_TRACE,
2310 "%s sc %p starting with replypostindex %u\n",
2311 __func__, sc, sc->replypostindex);
2312
2313 for ( ;; ) {
2314 cm = NULL;
2315 desc = &sc->post_queue[sc->replypostindex];
2316 flags = desc->Default.ReplyFlags &
2317 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2318 if ((flags == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) ||
2319 (le32toh(desc->Words.High) == 0xffffffff))
2320 break;
2321
2322 /* increment the replypostindex now, so that event handlers
2323 * and cm completion handlers which decide to do a diag
2324 * reset can zero it without it getting incremented again
2325 * afterwards, and we break out of this loop on the next
2326 * iteration since the reply post queue has been cleared to
2327 * 0xFF and all descriptors look unused (which they are).
2328 */
2329 if (++sc->replypostindex >= sc->pqdepth)
2330 sc->replypostindex = 0;
2331
2332 switch (flags) {
2333 case MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS:
2334 case MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS:
2335 case MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS:
2336 cm = &sc->commands[le16toh(desc->SCSIIOSuccess.SMID)];
2337 KASSERT(cm->cm_state == MPR_CM_STATE_INQUEUE,
2338 ("command not inqueue\n"));
2339 cm->cm_state = MPR_CM_STATE_BUSY;
2340 cm->cm_reply = NULL;
2341 break;
2342 case MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY:
2343 {
2344 uint32_t baddr;
2345 uint8_t *reply;
2346
2347 /*
2348 * Re-compose the reply address from the address
2349 * sent back from the chip. The ReplyFrameAddress
2350 * is the lower 32 bits of the physical address of
2351 * particular reply frame. Convert that address to
2352 * host format, and then use that to provide the
2353 * offset against the virtual address base
2354 * (sc->reply_frames).
2355 */
2356 baddr = le32toh(desc->AddressReply.ReplyFrameAddress);
2357 reply = sc->reply_frames +
2358 (baddr - ((uint32_t)sc->reply_busaddr));
2359 /*
2360 * Make sure the reply we got back is in a valid
2361 * range. If not, go ahead and panic here, since
2362 * we'll probably panic as soon as we deference the
2363 * reply pointer anyway.
2364 */
2365 if ((reply < sc->reply_frames)
2366 || (reply > (sc->reply_frames +
2367 (sc->fqdepth * sc->replyframesz)))) {
2368 kprintf("%s: WARNING: reply %p out of range!\n",
2369 __func__, reply);
2370 kprintf("%s: reply_frames %p, fqdepth %d, "
2371 "frame size %d\n", __func__,
2372 sc->reply_frames, sc->fqdepth,
2373 sc->replyframesz);
2374 kprintf("%s: baddr %#x,\n", __func__, baddr);
2375 /* LSI-TODO. See Linux Code for Graceful exit */
2376 panic("Reply address out of range");
2377 }
2378 if (le16toh(desc->AddressReply.SMID) == 0) {
2379 if (((MPI2_DEFAULT_REPLY *)reply)->Function ==
2380 MPI2_FUNCTION_DIAG_BUFFER_POST) {
2381 /*
2382 * If SMID is 0 for Diag Buffer Post,
2383 * this implies that the reply is due to
2384 * a release function with a status that
2385 * the buffer has been released. Set
2386 * the buffer flags accordingly.
2387 */
2388 rel_rep =
2389 (MPI2_DIAG_RELEASE_REPLY *)reply;
2390 if ((le16toh(rel_rep->IOCStatus) &
2391 MPI2_IOCSTATUS_MASK) ==
2392 MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED)
2393 {
2394 pBuffer =
2395 &sc->fw_diag_buffer_list[
2396 rel_rep->BufferType];
2397 pBuffer->valid_data = TRUE;
2398 pBuffer->owned_by_firmware =
2399 FALSE;
2400 pBuffer->immediate = FALSE;
2401 }
2402 } else
2403 mpr_dispatch_event(sc, baddr,
2404 (MPI2_EVENT_NOTIFICATION_REPLY *)
2405 reply);
2406 } else {
2407 cm = &sc->commands[
2408 le16toh(desc->AddressReply.SMID)];
2409 KASSERT(cm->cm_state == MPR_CM_STATE_INQUEUE,
2410 ("command not inqueue\n"));
2411 cm->cm_state = MPR_CM_STATE_BUSY;
2412 cm->cm_reply = reply;
2413 cm->cm_reply_data =
2414 le32toh(desc->AddressReply.
2415 ReplyFrameAddress);
2416 }
2417 break;
2418 }
2419 case MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS:
2420 case MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER:
2421 case MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS:
2422 default:
2423 /* Unhandled */
2424 mpr_dprint(sc, MPR_ERROR, "Unhandled reply 0x%x\n",
2425 desc->Default.ReplyFlags);
2426 cm = NULL;
2427 break;
2428 }
2429
2430 if (cm != NULL) {
2431 // Print Error reply frame
2432 if (cm->cm_reply)
2433 mpr_display_reply_info(sc,cm->cm_reply);
2434 mpr_complete_command(sc, cm);
2435 }
2436
2437 desc->Words.Low = 0xffffffff;
2438 desc->Words.High = 0xffffffff;
2439 }
2440
2441 if (pq != sc->replypostindex) {
2442 mpr_dprint(sc, MPR_TRACE, "%s sc %p writing postindex %d\n",
2443 __func__, sc, sc->replypostindex);
2444 mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET,
2445 sc->replypostindex);
2446 }
2447
2448 return;
2449 }
2450
2451 static void
mpr_dispatch_event(struct mpr_softc * sc,uintptr_t data,MPI2_EVENT_NOTIFICATION_REPLY * reply)2452 mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data,
2453 MPI2_EVENT_NOTIFICATION_REPLY *reply)
2454 {
2455 struct mpr_event_handle *eh;
2456 int event, handled = 0;
2457
2458 event = le16toh(reply->Event);
2459 TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
2460 if (isset(eh->mask, event)) {
2461 eh->callback(sc, data, reply);
2462 handled++;
2463 }
2464 }
2465
2466 if (handled == 0)
2467 mpr_dprint(sc, MPR_EVENT, "Unhandled event 0x%x\n",
2468 le16toh(event));
2469
2470 /*
2471 * This is the only place that the event/reply should be freed.
2472 * Anything wanting to hold onto the event data should have
2473 * already copied it into their own storage.
2474 */
2475 mpr_free_reply(sc, data);
2476 }
2477
2478 static void
mpr_reregister_events_complete(struct mpr_softc * sc,struct mpr_command * cm)2479 mpr_reregister_events_complete(struct mpr_softc *sc, struct mpr_command *cm)
2480 {
2481 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
2482
2483 if (cm->cm_reply)
2484 MPR_DPRINT_EVENT(sc, generic,
2485 (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply);
2486
2487 mpr_free_command(sc, cm);
2488
2489 /* next, send a port enable */
2490 mprsas_startup(sc);
2491 }
2492
2493 /*
2494 * For both register_events and update_events, the caller supplies a bitmap
2495 * of events that it _wants_. These functions then turn that into a bitmask
2496 * suitable for the controller.
2497 */
2498 int
mpr_register_events(struct mpr_softc * sc,uint8_t * mask,mpr_evt_callback_t * cb,void * data,struct mpr_event_handle ** handle)2499 mpr_register_events(struct mpr_softc *sc, uint8_t *mask,
2500 mpr_evt_callback_t *cb, void *data, struct mpr_event_handle **handle)
2501 {
2502 struct mpr_event_handle *eh;
2503 int error = 0;
2504
2505 eh = kmalloc(sizeof(struct mpr_event_handle), M_MPR, M_WAITOK|M_ZERO);
2506 if (!eh) {
2507 mpr_dprint(sc, MPR_EVENT|MPR_ERROR,
2508 "Cannot allocate event memory\n");
2509 return (ENOMEM);
2510 }
2511 eh->callback = cb;
2512 eh->data = data;
2513 TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list);
2514 if (mask != NULL)
2515 error = mpr_update_events(sc, eh, mask);
2516 *handle = eh;
2517
2518 return (error);
2519 }
2520
2521 int
mpr_update_events(struct mpr_softc * sc,struct mpr_event_handle * handle,uint8_t * mask)2522 mpr_update_events(struct mpr_softc *sc, struct mpr_event_handle *handle,
2523 uint8_t *mask)
2524 {
2525 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq;
2526 MPI2_EVENT_NOTIFICATION_REPLY *reply = NULL;
2527 struct mpr_command *cm = NULL;
2528 struct mpr_event_handle *eh;
2529 int error, i;
2530
2531 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
2532
2533 if ((mask != NULL) && (handle != NULL))
2534 bcopy(mask, &handle->mask[0], 16);
2535 memset(sc->event_mask, 0xff, 16);
2536
2537 TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
2538 for (i = 0; i < 16; i++)
2539 sc->event_mask[i] &= ~eh->mask[i];
2540 }
2541
2542 if ((cm = mpr_alloc_command(sc)) == NULL)
2543 return (EBUSY);
2544 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req;
2545 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
2546 evtreq->MsgFlags = 0;
2547 evtreq->SASBroadcastPrimitiveMasks = 0;
2548 #ifdef MPR_DEBUG_ALL_EVENTS
2549 {
2550 u_char fullmask[16];
2551 memset(fullmask, 0x00, 16);
2552 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16);
2553 }
2554 #else
2555 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16);
2556 #endif
2557 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2558 cm->cm_data = NULL;
2559
2560 error = mpr_request_polled(sc, &cm);
2561 if (cm != NULL)
2562 reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply;
2563 if ((reply == NULL) ||
2564 (reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
2565 error = ENXIO;
2566
2567 if (reply)
2568 MPR_DPRINT_EVENT(sc, generic, reply);
2569
2570 mpr_dprint(sc, MPR_TRACE, "%s finished error %d\n", __func__, error);
2571
2572 if (cm != NULL)
2573 mpr_free_command(sc, cm);
2574 return (error);
2575 }
2576
2577 static int
mpr_reregister_events(struct mpr_softc * sc)2578 mpr_reregister_events(struct mpr_softc *sc)
2579 {
2580 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq;
2581 struct mpr_command *cm;
2582 struct mpr_event_handle *eh;
2583 int error, i;
2584
2585 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
2586
2587 /* first, reregister events */
2588
2589 memset(sc->event_mask, 0xff, 16);
2590
2591 TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
2592 for (i = 0; i < 16; i++)
2593 sc->event_mask[i] &= ~eh->mask[i];
2594 }
2595
2596 if ((cm = mpr_alloc_command(sc)) == NULL)
2597 return (EBUSY);
2598 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req;
2599 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
2600 evtreq->MsgFlags = 0;
2601 evtreq->SASBroadcastPrimitiveMasks = 0;
2602 #ifdef MPR_DEBUG_ALL_EVENTS
2603 {
2604 u_char fullmask[16];
2605 memset(fullmask, 0x00, 16);
2606 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16);
2607 }
2608 #else
2609 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16);
2610 #endif
2611 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2612 cm->cm_data = NULL;
2613 cm->cm_complete = mpr_reregister_events_complete;
2614
2615 error = mpr_map_command(sc, cm);
2616
2617 mpr_dprint(sc, MPR_TRACE, "%s finished with error %d\n", __func__,
2618 error);
2619 return (error);
2620 }
2621
2622 int
mpr_deregister_events(struct mpr_softc * sc,struct mpr_event_handle * handle)2623 mpr_deregister_events(struct mpr_softc *sc, struct mpr_event_handle *handle)
2624 {
2625
2626 TAILQ_REMOVE(&sc->event_list, handle, eh_list);
2627 kfree(handle, M_MPR);
2628 return (mpr_update_events(sc, NULL, NULL));
2629 }
2630
2631 /**
2632 * mpr_build_nvme_prp - This function is called for NVMe end devices to build a
2633 * native SGL (NVMe PRP). The native SGL is built starting in the first PRP entry
2634 * of the NVMe message (PRP1). If the data buffer is small enough to be described
2635 * entirely using PRP1, then PRP2 is not used. If needed, PRP2 is used to
2636 * describe a larger data buffer. If the data buffer is too large to describe
2637 * using the two PRP entriess inside the NVMe message, then PRP1 describes the
2638 * first data memory segment, and PRP2 contains a pointer to a PRP list located
2639 * elsewhere in memory to describe the remaining data memory segments. The PRP
2640 * list will be contiguous.
2641
2642 * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP
2643 * consists of a list of PRP entries to describe a number of noncontigous
2644 * physical memory segments as a single memory buffer, just as a SGL does. Note
2645 * however, that this function is only used by the IOCTL call, so the memory
2646 * given will be guaranteed to be contiguous. There is no need to translate
2647 * non-contiguous SGL into a PRP in this case. All PRPs will describe contiguous
2648 * space that is one page size each.
2649 *
2650 * Each NVMe message contains two PRP entries. The first (PRP1) either contains
2651 * a PRP list pointer or a PRP element, depending upon the command. PRP2 contains
2652 * the second PRP element if the memory being described fits within 2 PRP
2653 * entries, or a PRP list pointer if the PRP spans more than two entries.
2654 *
2655 * A PRP list pointer contains the address of a PRP list, structured as a linear
2656 * array of PRP entries. Each PRP entry in this list describes a segment of
2657 * physical memory.
2658 *
2659 * Each 64-bit PRP entry comprises an address and an offset field. The address
2660 * always points to the beginning of a PAGE_SIZE physical memory page, and the
2661 * offset describes where within that page the memory segment begins. Only the
2662 * first element in a PRP list may contain a non-zero offest, implying that all
2663 * memory segments following the first begin at the start of a PAGE_SIZE page.
2664 *
2665 * Each PRP element normally describes a chunck of PAGE_SIZE physical memory,
2666 * with exceptions for the first and last elements in the list. If the memory
2667 * being described by the list begins at a non-zero offset within the first page,
2668 * then the first PRP element will contain a non-zero offset indicating where the
2669 * region begins within the page. The last memory segment may end before the end
2670 * of the PAGE_SIZE segment, depending upon the overall size of the memory being
2671 * described by the PRP list.
2672 *
2673 * Since PRP entries lack any indication of size, the overall data buffer length
2674 * is used to determine where the end of the data memory buffer is located, and
2675 * how many PRP entries are required to describe it.
2676 *
2677 * Returns nothing.
2678 */
2679 void
mpr_build_nvme_prp(struct mpr_softc * sc,struct mpr_command * cm,Mpi26NVMeEncapsulatedRequest_t * nvme_encap_request,void * data,uint32_t data_in_sz,uint32_t data_out_sz)2680 mpr_build_nvme_prp(struct mpr_softc *sc, struct mpr_command *cm,
2681 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request, void *data,
2682 uint32_t data_in_sz, uint32_t data_out_sz)
2683 {
2684 int prp_size = PRP_ENTRY_SIZE;
2685 uint64_t *prp_entry, *prp1_entry, *prp2_entry;
2686 uint64_t *prp_entry_phys, *prp_page, *prp_page_phys;
2687 uint32_t offset, entry_len, page_mask_result, page_mask;
2688 bus_addr_t paddr;
2689 size_t length;
2690 struct mpr_prp_page *prp_page_info = NULL;
2691
2692 /*
2693 * Not all commands require a data transfer. If no data, just return
2694 * without constructing any PRP.
2695 */
2696 if (!data_in_sz && !data_out_sz)
2697 return;
2698
2699 /*
2700 * Set pointers to PRP1 and PRP2, which are in the NVMe command. PRP1 is
2701 * located at a 24 byte offset from the start of the NVMe command. Then
2702 * set the current PRP entry pointer to PRP1.
2703 */
2704 prp1_entry = (uint64_t *)(nvme_encap_request->NVMe_Command +
2705 NVME_CMD_PRP1_OFFSET);
2706 prp2_entry = (uint64_t *)(nvme_encap_request->NVMe_Command +
2707 NVME_CMD_PRP2_OFFSET);
2708 prp_entry = prp1_entry;
2709
2710 /*
2711 * For the PRP entries, use the specially allocated buffer of
2712 * contiguous memory. PRP Page allocation failures should not happen
2713 * because there should be enough PRP page buffers to account for the
2714 * possible NVMe QDepth.
2715 */
2716 prp_page_info = mpr_alloc_prp_page(sc);
2717 KASSERT(prp_page_info != NULL, ("%s: There are no PRP Pages left to be "
2718 "used for building a native NVMe SGL.\n", __func__));
2719 prp_page = (uint64_t *)prp_page_info->prp_page;
2720 prp_page_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr;
2721
2722 /*
2723 * Insert the allocated PRP page into the command's PRP page list. This
2724 * will be freed when the command is freed.
2725 */
2726 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
2727
2728 /*
2729 * Check if we are within 1 entry of a page boundary we don't want our
2730 * first entry to be a PRP List entry.
2731 */
2732 page_mask = PAGE_SIZE - 1;
2733 page_mask_result = (uintptr_t)((uint8_t *)prp_page + prp_size) &
2734 page_mask;
2735 if (!page_mask_result)
2736 {
2737 /* Bump up to next page boundary. */
2738 prp_page = (uint64_t *)((uint8_t *)prp_page + prp_size);
2739 prp_page_phys = (uint64_t *)((uint8_t *)prp_page_phys +
2740 prp_size);
2741 }
2742
2743 /*
2744 * Set PRP physical pointer, which initially points to the current PRP
2745 * DMA memory page.
2746 */
2747 prp_entry_phys = prp_page_phys;
2748
2749 /* Get physical address and length of the data buffer. */
2750 paddr = (bus_addr_t)(uintptr_t)data;
2751 if (data_in_sz)
2752 length = data_in_sz;
2753 else
2754 length = data_out_sz;
2755
2756 /* Loop while the length is not zero. */
2757 while (length)
2758 {
2759 /*
2760 * Check if we need to put a list pointer here if we are at page
2761 * boundary - prp_size (8 bytes).
2762 */
2763 page_mask_result = (uintptr_t)((uint8_t *)prp_entry_phys +
2764 prp_size) & page_mask;
2765 if (!page_mask_result)
2766 {
2767 /*
2768 * This is the last entry in a PRP List, so we need to
2769 * put a PRP list pointer here. What this does is:
2770 * - bump the current memory pointer to the next
2771 * address, which will be the next full page.
2772 * - set the PRP Entry to point to that page. This is
2773 * now the PRP List pointer.
2774 * - bump the PRP Entry pointer the start of the next
2775 * page. Since all of this PRP memory is contiguous,
2776 * no need to get a new page - it's just the next
2777 * address.
2778 */
2779 prp_entry_phys++;
2780 *prp_entry =
2781 htole64((uint64_t)(uintptr_t)prp_entry_phys);
2782 prp_entry++;
2783 }
2784
2785 /* Need to handle if entry will be part of a page. */
2786 offset = (uint32_t)paddr & page_mask;
2787 entry_len = PAGE_SIZE - offset;
2788
2789 if (prp_entry == prp1_entry)
2790 {
2791 /*
2792 * Must fill in the first PRP pointer (PRP1) before
2793 * moving on.
2794 */
2795 *prp1_entry = htole64((uint64_t)paddr);
2796
2797 /*
2798 * Now point to the second PRP entry within the
2799 * command (PRP2).
2800 */
2801 prp_entry = prp2_entry;
2802 }
2803 else if (prp_entry == prp2_entry)
2804 {
2805 /*
2806 * Should the PRP2 entry be a PRP List pointer or just a
2807 * regular PRP pointer? If there is more than one more
2808 * page of data, must use a PRP List pointer.
2809 */
2810 if (length > PAGE_SIZE)
2811 {
2812 /*
2813 * PRP2 will contain a PRP List pointer because
2814 * more PRP's are needed with this command. The
2815 * list will start at the beginning of the
2816 * contiguous buffer.
2817 */
2818 *prp2_entry =
2819 htole64(
2820 (uint64_t)(uintptr_t)prp_entry_phys);
2821
2822 /*
2823 * The next PRP Entry will be the start of the
2824 * first PRP List.
2825 */
2826 prp_entry = prp_page;
2827 }
2828 else
2829 {
2830 /*
2831 * After this, the PRP Entries are complete.
2832 * This command uses 2 PRP's and no PRP list.
2833 */
2834 *prp2_entry = htole64((uint64_t)paddr);
2835 }
2836 }
2837 else
2838 {
2839 /*
2840 * Put entry in list and bump the addresses.
2841 *
2842 * After PRP1 and PRP2 are filled in, this will fill in
2843 * all remaining PRP entries in a PRP List, one per each
2844 * time through the loop.
2845 */
2846 *prp_entry = htole64((uint64_t)paddr);
2847 prp_entry++;
2848 prp_entry_phys++;
2849 }
2850
2851 /*
2852 * Bump the phys address of the command's data buffer by the
2853 * entry_len.
2854 */
2855 paddr += entry_len;
2856
2857 /* Decrement length accounting for last partial page. */
2858 if (entry_len > length)
2859 length = 0;
2860 else
2861 length -= entry_len;
2862 }
2863 }
2864
2865 /*
2866 * mpr_check_pcie_native_sgl - This function is called for PCIe end devices to
2867 * determine if the driver needs to build a native SGL. If so, that native SGL
2868 * is built in the contiguous buffers allocated especially for PCIe SGL
2869 * creation. If the driver will not build a native SGL, return TRUE and a
2870 * normal IEEE SGL will be built. Currently this routine supports NVMe devices
2871 * only.
2872 *
2873 * Returns FALSE (0) if native SGL was built, TRUE (1) if no SGL was built.
2874 */
2875 static int
mpr_check_pcie_native_sgl(struct mpr_softc * sc,struct mpr_command * cm,bus_dma_segment_t * segs,int segs_left)2876 mpr_check_pcie_native_sgl(struct mpr_softc *sc, struct mpr_command *cm,
2877 bus_dma_segment_t *segs, int segs_left)
2878 {
2879 uint32_t i, sge_dwords, length, offset, entry_len;
2880 uint32_t num_entries, buff_len = 0, sges_in_segment;
2881 uint32_t page_mask, page_mask_result, *curr_buff;
2882 uint32_t *ptr_sgl, *ptr_first_sgl, first_page_offset;
2883 uint32_t first_page_data_size, end_residual;
2884 uint64_t *msg_phys;
2885 bus_addr_t paddr;
2886 int build_native_sgl = 0, first_prp_entry;
2887 int prp_size = PRP_ENTRY_SIZE;
2888 Mpi25IeeeSgeChain64_t *main_chain_element = NULL;
2889 struct mpr_prp_page *prp_page_info = NULL;
2890
2891 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
2892
2893 /*
2894 * Add up the sizes of each segment length to get the total transfer
2895 * size, which will be checked against the Maximum Data Transfer Size.
2896 * If the data transfer length exceeds the MDTS for this device, just
2897 * return 1 so a normal IEEE SGL will be built. F/W will break the I/O
2898 * up into multiple I/O's. [nvme_mdts = 0 means unlimited]
2899 */
2900 for (i = 0; i < segs_left; i++)
2901 buff_len += htole32(segs[i].ds_len);
2902 if ((cm->cm_targ->MDTS > 0) && (buff_len > cm->cm_targ->MDTS))
2903 return 1;
2904
2905 /* Create page_mask (to get offset within page) */
2906 page_mask = PAGE_SIZE - 1;
2907
2908 /*
2909 * Check if the number of elements exceeds the max number that can be
2910 * put in the main message frame (H/W can only translate an SGL that
2911 * is contained entirely in the main message frame).
2912 */
2913 sges_in_segment = (sc->reqframesz -
2914 offsetof(Mpi25SCSIIORequest_t, SGL)) / sizeof(MPI25_SGE_IO_UNION);
2915 if (segs_left > sges_in_segment)
2916 build_native_sgl = 1;
2917 else
2918 {
2919 /*
2920 * NVMe uses one PRP for each physical page (or part of physical
2921 * page).
2922 * if 4 pages or less then IEEE is OK
2923 * if > 5 pages then we need to build a native SGL
2924 * if > 4 and <= 5 pages, then check the physical address of
2925 * the first SG entry, then if this first size in the page
2926 * is >= the residual beyond 4 pages then use IEEE,
2927 * otherwise use native SGL
2928 */
2929 if (buff_len > (PAGE_SIZE * 5))
2930 build_native_sgl = 1;
2931 else if ((buff_len > (PAGE_SIZE * 4)) &&
2932 (buff_len <= (PAGE_SIZE * 5)) )
2933 {
2934 msg_phys = (uint64_t *)(uintptr_t)segs[0].ds_addr;
2935 first_page_offset =
2936 ((uint32_t)(uint64_t)(uintptr_t)msg_phys &
2937 page_mask);
2938 first_page_data_size = PAGE_SIZE - first_page_offset;
2939 end_residual = buff_len % PAGE_SIZE;
2940
2941 /*
2942 * If offset into first page pushes the end of the data
2943 * beyond end of the 5th page, we need the extra PRP
2944 * list.
2945 */
2946 if (first_page_data_size < end_residual)
2947 build_native_sgl = 1;
2948
2949 /*
2950 * Check if first SG entry size is < residual beyond 4
2951 * pages.
2952 */
2953 if (htole32(segs[0].ds_len) <
2954 (buff_len - (PAGE_SIZE * 4)))
2955 build_native_sgl = 1;
2956 }
2957 }
2958
2959 /* check if native SGL is needed */
2960 if (!build_native_sgl)
2961 return 1;
2962
2963 /*
2964 * Native SGL is needed.
2965 * Put a chain element in main message frame that points to the first
2966 * chain buffer.
2967 *
2968 * NOTE: The ChainOffset field must be 0 when using a chain pointer to
2969 * a native SGL.
2970 */
2971
2972 /* Set main message chain element pointer */
2973 main_chain_element = (pMpi25IeeeSgeChain64_t)cm->cm_sge;
2974
2975 /*
2976 * For NVMe the chain element needs to be the 2nd SGL entry in the main
2977 * message.
2978 */
2979 main_chain_element = (Mpi25IeeeSgeChain64_t *)
2980 ((uint8_t *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
2981
2982 /*
2983 * For the PRP entries, use the specially allocated buffer of
2984 * contiguous memory. PRP Page allocation failures should not happen
2985 * because there should be enough PRP page buffers to account for the
2986 * possible NVMe QDepth.
2987 */
2988 prp_page_info = mpr_alloc_prp_page(sc);
2989 KASSERT(prp_page_info != NULL, ("%s: There are no PRP Pages left to be "
2990 "used for building a native NVMe SGL.\n", __func__));
2991 curr_buff = (uint32_t *)prp_page_info->prp_page;
2992 msg_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr;
2993
2994 /*
2995 * Insert the allocated PRP page into the command's PRP page list. This
2996 * will be freed when the command is freed.
2997 */
2998 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
2999
3000 /*
3001 * Check if we are within 1 entry of a page boundary we don't want our
3002 * first entry to be a PRP List entry.
3003 */
3004 page_mask_result = (uintptr_t)((uint8_t *)curr_buff + prp_size) &
3005 page_mask;
3006 if (!page_mask_result) {
3007 /* Bump up to next page boundary. */
3008 curr_buff = (uint32_t *)((uint8_t *)curr_buff + prp_size);
3009 msg_phys = (uint64_t *)((uint8_t *)msg_phys + prp_size);
3010 }
3011
3012 /* Fill in the chain element and make it an NVMe segment type. */
3013 main_chain_element->Address.High =
3014 htole32((uint32_t)((uint64_t)(uintptr_t)msg_phys >> 32));
3015 main_chain_element->Address.Low =
3016 htole32((uint32_t)(uintptr_t)msg_phys);
3017 main_chain_element->NextChainOffset = 0;
3018 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3019 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
3020 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
3021
3022 /* Set SGL pointer to start of contiguous PCIe buffer. */
3023 ptr_sgl = curr_buff;
3024 sge_dwords = 2;
3025 num_entries = 0;
3026
3027 /*
3028 * NVMe has a very convoluted PRP format. One PRP is required for each
3029 * page or partial page. We need to split up OS SG entries if they are
3030 * longer than one page or cross a page boundary. We also have to insert
3031 * a PRP list pointer entry as the last entry in each physical page of
3032 * the PRP list.
3033 *
3034 * NOTE: The first PRP "entry" is actually placed in the first SGL entry
3035 * in the main message in IEEE 64 format. The 2nd entry in the main
3036 * message is the chain element, and the rest of the PRP entries are
3037 * built in the contiguous PCIe buffer.
3038 */
3039 first_prp_entry = 1;
3040 ptr_first_sgl = (uint32_t *)cm->cm_sge;
3041
3042 for (i = 0; i < segs_left; i++) {
3043 /* Get physical address and length of this SG entry. */
3044 paddr = segs[i].ds_addr;
3045 length = segs[i].ds_len;
3046
3047 /*
3048 * Check whether a given SGE buffer lies on a non-PAGED
3049 * boundary if this is not the first page. If so, this is not
3050 * expected so have FW build the SGL.
3051 */
3052 if ((i != 0) && (((uint32_t)paddr & page_mask) != 0)) {
3053 mpr_dprint(sc, MPR_ERROR, "Unaligned SGE while "
3054 "building NVMe PRPs, low address is 0x%x\n",
3055 (uint32_t)paddr);
3056 return 1;
3057 }
3058
3059 /* Apart from last SGE, if any other SGE boundary is not page
3060 * aligned then it means that hole exists. Existence of hole
3061 * leads to data corruption. So fallback to IEEE SGEs.
3062 */
3063 if (i != (segs_left - 1)) {
3064 if (((uint32_t)paddr + length) & page_mask) {
3065 mpr_dprint(sc, MPR_ERROR, "Unaligned SGE "
3066 "boundary while building NVMe PRPs, low "
3067 "address: 0x%x and length: %u\n",
3068 (uint32_t)paddr, length);
3069 return 1;
3070 }
3071 }
3072
3073 /* Loop while the length is not zero. */
3074 while (length) {
3075 /*
3076 * Check if we need to put a list pointer here if we are
3077 * at page boundary - prp_size.
3078 */
3079 page_mask_result = (uintptr_t)((uint8_t *)ptr_sgl +
3080 prp_size) & page_mask;
3081 if (!page_mask_result) {
3082 /*
3083 * Need to put a PRP list pointer here.
3084 */
3085 msg_phys = (uint64_t *)((uint8_t *)msg_phys +
3086 prp_size);
3087 *ptr_sgl = htole32((uintptr_t)msg_phys);
3088 *(ptr_sgl+1) = htole32((uint64_t)(uintptr_t)
3089 msg_phys >> 32);
3090 ptr_sgl += sge_dwords;
3091 num_entries++;
3092 }
3093
3094 /* Need to handle if entry will be part of a page. */
3095 offset = (uint32_t)paddr & page_mask;
3096 entry_len = PAGE_SIZE - offset;
3097 if (first_prp_entry) {
3098 /*
3099 * Put IEEE entry in first SGE in main message.
3100 * (Simple element, System addr, not end of
3101 * list.)
3102 */
3103 *ptr_first_sgl = htole32((uint32_t)paddr);
3104 *(ptr_first_sgl + 1) =
3105 htole32((uint32_t)((uint64_t)paddr >> 32));
3106 *(ptr_first_sgl + 2) = htole32(entry_len);
3107 *(ptr_first_sgl + 3) = 0;
3108
3109 /* No longer the first PRP entry. */
3110 first_prp_entry = 0;
3111 } else {
3112 /* Put entry in list. */
3113 *ptr_sgl = htole32((uint32_t)paddr);
3114 *(ptr_sgl + 1) =
3115 htole32((uint32_t)((uint64_t)paddr >> 32));
3116
3117 /* Bump ptr_sgl, msg_phys, and num_entries. */
3118 ptr_sgl += sge_dwords;
3119 msg_phys = (uint64_t *)((uint8_t *)msg_phys +
3120 prp_size);
3121 num_entries++;
3122 }
3123
3124 /* Bump the phys address by the entry_len. */
3125 paddr += entry_len;
3126
3127 /* Decrement length accounting for last partial page. */
3128 if (entry_len > length)
3129 length = 0;
3130 else
3131 length -= entry_len;
3132 }
3133 }
3134
3135 /* Set chain element Length. */
3136 main_chain_element->Length = htole32(num_entries * prp_size);
3137
3138 /* Return 0, indicating we built a native SGL. */
3139 return 0;
3140 }
3141
3142 /*
3143 * Add a chain element as the next SGE for the specified command.
3144 * Reset cm_sge and cm_sgesize to indicate all the available space. Chains are
3145 * only required for IEEE commands. Therefore there is no code for commands
3146 * that have the MPR_CM_FLAGS_SGE_SIMPLE flag set (and those commands
3147 * shouldn't be requesting chains).
3148 */
3149 static int
mpr_add_chain(struct mpr_command * cm,int segsleft)3150 mpr_add_chain(struct mpr_command *cm, int segsleft)
3151 {
3152 struct mpr_softc *sc = cm->cm_sc;
3153 MPI2_REQUEST_HEADER *req;
3154 MPI25_IEEE_SGE_CHAIN64 *ieee_sgc;
3155 struct mpr_chain *chain;
3156 int sgc_size, current_segs, rem_segs, segs_per_frame;
3157 uint8_t next_chain_offset = 0;
3158
3159 /*
3160 * Fail if a command is requesting a chain for SIMPLE SGE's. For SAS3
3161 * only IEEE commands should be requesting chains. Return some error
3162 * code other than 0.
3163 */
3164 if (cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE) {
3165 mpr_dprint(sc, MPR_ERROR, "A chain element cannot be added to "
3166 "an MPI SGL.\n");
3167 return(ENOBUFS);
3168 }
3169
3170 sgc_size = sizeof(MPI25_IEEE_SGE_CHAIN64);
3171 if (cm->cm_sglsize < sgc_size)
3172 panic("MPR: Need SGE Error Code\n");
3173
3174 chain = mpr_alloc_chain(cm->cm_sc);
3175 if (chain == NULL)
3176 return (ENOBUFS);
3177
3178 /*
3179 * Note: a double-linked list is used to make it easier to walk for
3180 * debugging.
3181 */
3182 TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link);
3183
3184 /*
3185 * Need to know if the number of frames left is more than 1 or not. If
3186 * more than 1 frame is required, NextChainOffset will need to be set,
3187 * which will just be the last segment of the frame.
3188 */
3189 rem_segs = 0;
3190 if (cm->cm_sglsize < (sgc_size * segsleft)) {
3191 /*
3192 * rem_segs is the number of segements remaining after the
3193 * segments that will go into the current frame. Since it is
3194 * known that at least one more frame is required, account for
3195 * the chain element. To know if more than one more frame is
3196 * required, just check if there will be a remainder after using
3197 * the current frame (with this chain) and the next frame. If
3198 * so the NextChainOffset must be the last element of the next
3199 * frame.
3200 */
3201 current_segs = (cm->cm_sglsize / sgc_size) - 1;
3202 rem_segs = segsleft - current_segs;
3203 segs_per_frame = sc->chain_frame_size / sgc_size;
3204 if (rem_segs > segs_per_frame) {
3205 next_chain_offset = segs_per_frame - 1;
3206 }
3207 }
3208 ieee_sgc = &((MPI25_SGE_IO_UNION *)cm->cm_sge)->IeeeChain;
3209 ieee_sgc->Length = next_chain_offset ?
3210 htole32((uint32_t)sc->chain_frame_size) :
3211 htole32((uint32_t)rem_segs * (uint32_t)sgc_size);
3212 ieee_sgc->NextChainOffset = next_chain_offset;
3213 ieee_sgc->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3214 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
3215 ieee_sgc->Address.Low = htole32(chain->chain_busaddr);
3216 ieee_sgc->Address.High = htole32(chain->chain_busaddr >> 32);
3217 cm->cm_sge = &((MPI25_SGE_IO_UNION *)chain->chain)->IeeeSimple;
3218 req = (MPI2_REQUEST_HEADER *)cm->cm_req;
3219 req->ChainOffset = (sc->chain_frame_size - sgc_size) >> 4;
3220
3221 cm->cm_sglsize = sc->chain_frame_size;
3222 return (0);
3223 }
3224
3225 /*
3226 * Add one scatter-gather element to the scatter-gather list for a command.
3227 * Maintain cm_sglsize and cm_sge as the remaining size and pointer to the
3228 * next SGE to fill in, respectively. In Gen3, the MPI SGL does not have a
3229 * chain, so don't consider any chain additions.
3230 */
3231 int
mpr_push_sge(struct mpr_command * cm,MPI2_SGE_SIMPLE64 * sge,size_t len,int segsleft)3232 mpr_push_sge(struct mpr_command *cm, MPI2_SGE_SIMPLE64 *sge, size_t len,
3233 int segsleft)
3234 {
3235 uint32_t saved_buf_len, saved_address_low, saved_address_high;
3236 u32 sge_flags;
3237
3238 /*
3239 * case 1: >=1 more segment, no room for anything (error)
3240 * case 2: 1 more segment and enough room for it
3241 */
3242
3243 if (cm->cm_sglsize < (segsleft * sizeof(MPI2_SGE_SIMPLE64))) {
3244 mpr_dprint(cm->cm_sc, MPR_ERROR,
3245 "%s: warning: Not enough room for MPI SGL in frame.\n",
3246 __func__);
3247 return(ENOBUFS);
3248 }
3249
3250 KASSERT(segsleft == 1,
3251 ("segsleft cannot be more than 1 for an MPI SGL; segsleft = %d\n",
3252 segsleft));
3253
3254 /*
3255 * There is one more segment left to add for the MPI SGL and there is
3256 * enough room in the frame to add it. This is the normal case because
3257 * MPI SGL's don't have chains, otherwise something is wrong.
3258 *
3259 * If this is a bi-directional request, need to account for that
3260 * here. Save the pre-filled sge values. These will be used
3261 * either for the 2nd SGL or for a single direction SGL. If
3262 * cm_out_len is non-zero, this is a bi-directional request, so
3263 * fill in the OUT SGL first, then the IN SGL, otherwise just
3264 * fill in the IN SGL. Note that at this time, when filling in
3265 * 2 SGL's for a bi-directional request, they both use the same
3266 * DMA buffer (same cm command).
3267 */
3268 saved_buf_len = sge->FlagsLength & 0x00FFFFFF;
3269 saved_address_low = sge->Address.Low;
3270 saved_address_high = sge->Address.High;
3271 if (cm->cm_out_len) {
3272 sge->FlagsLength = cm->cm_out_len |
3273 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
3274 MPI2_SGE_FLAGS_END_OF_BUFFER |
3275 MPI2_SGE_FLAGS_HOST_TO_IOC |
3276 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
3277 MPI2_SGE_FLAGS_SHIFT);
3278 cm->cm_sglsize -= len;
3279 /* Endian Safe code */
3280 sge_flags = sge->FlagsLength;
3281 sge->FlagsLength = htole32(sge_flags);
3282 sge->Address.High = htole32(sge->Address.High);
3283 sge->Address.Low = htole32(sge->Address.Low);
3284 bcopy(sge, cm->cm_sge, len);
3285 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len);
3286 }
3287 sge->FlagsLength = saved_buf_len |
3288 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
3289 MPI2_SGE_FLAGS_END_OF_BUFFER |
3290 MPI2_SGE_FLAGS_LAST_ELEMENT |
3291 MPI2_SGE_FLAGS_END_OF_LIST |
3292 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
3293 MPI2_SGE_FLAGS_SHIFT);
3294 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) {
3295 sge->FlagsLength |=
3296 ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
3297 MPI2_SGE_FLAGS_SHIFT);
3298 } else {
3299 sge->FlagsLength |=
3300 ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
3301 MPI2_SGE_FLAGS_SHIFT);
3302 }
3303 sge->Address.Low = saved_address_low;
3304 sge->Address.High = saved_address_high;
3305
3306 cm->cm_sglsize -= len;
3307 /* Endian Safe code */
3308 sge_flags = sge->FlagsLength;
3309 sge->FlagsLength = htole32(sge_flags);
3310 sge->Address.High = htole32(sge->Address.High);
3311 sge->Address.Low = htole32(sge->Address.Low);
3312 bcopy(sge, cm->cm_sge, len);
3313 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len);
3314 return (0);
3315 }
3316
3317 /*
3318 * Add one IEEE scatter-gather element (chain or simple) to the IEEE scatter-
3319 * gather list for a command. Maintain cm_sglsize and cm_sge as the
3320 * remaining size and pointer to the next SGE to fill in, respectively.
3321 */
3322 int
mpr_push_ieee_sge(struct mpr_command * cm,void * sgep,int segsleft)3323 mpr_push_ieee_sge(struct mpr_command *cm, void *sgep, int segsleft)
3324 {
3325 MPI2_IEEE_SGE_SIMPLE64 *sge = sgep;
3326 int error, ieee_sge_size = sizeof(MPI25_SGE_IO_UNION);
3327 uint32_t saved_buf_len, saved_address_low, saved_address_high;
3328 uint32_t sge_length;
3329
3330 /*
3331 * case 1: No room for chain or segment (error).
3332 * case 2: Two or more segments left but only room for chain.
3333 * case 3: Last segment and room for it, so set flags.
3334 */
3335
3336 /*
3337 * There should be room for at least one element, or there is a big
3338 * problem.
3339 */
3340 if (cm->cm_sglsize < ieee_sge_size)
3341 panic("MPR: Need SGE Error Code\n");
3342
3343 if ((segsleft >= 2) && (cm->cm_sglsize < (ieee_sge_size * 2))) {
3344 if ((error = mpr_add_chain(cm, segsleft)) != 0)
3345 return (error);
3346 }
3347
3348 if (segsleft == 1) {
3349 /*
3350 * If this is a bi-directional request, need to account for that
3351 * here. Save the pre-filled sge values. These will be used
3352 * either for the 2nd SGL or for a single direction SGL. If
3353 * cm_out_len is non-zero, this is a bi-directional request, so
3354 * fill in the OUT SGL first, then the IN SGL, otherwise just
3355 * fill in the IN SGL. Note that at this time, when filling in
3356 * 2 SGL's for a bi-directional request, they both use the same
3357 * DMA buffer (same cm command).
3358 */
3359 saved_buf_len = sge->Length;
3360 saved_address_low = sge->Address.Low;
3361 saved_address_high = sge->Address.High;
3362 if (cm->cm_out_len) {
3363 sge->Length = cm->cm_out_len;
3364 sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
3365 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
3366 cm->cm_sglsize -= ieee_sge_size;
3367 /* Endian Safe code */
3368 sge_length = sge->Length;
3369 sge->Length = htole32(sge_length);
3370 sge->Address.High = htole32(sge->Address.High);
3371 sge->Address.Low = htole32(sge->Address.Low);
3372 bcopy(sgep, cm->cm_sge, ieee_sge_size);
3373 cm->cm_sge =
3374 (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge +
3375 ieee_sge_size);
3376 }
3377 sge->Length = saved_buf_len;
3378 sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
3379 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
3380 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
3381 sge->Address.Low = saved_address_low;
3382 sge->Address.High = saved_address_high;
3383 }
3384
3385 cm->cm_sglsize -= ieee_sge_size;
3386 /* Endian Safe code */
3387 sge_length = sge->Length;
3388 sge->Length = htole32(sge_length);
3389 sge->Address.High = htole32(sge->Address.High);
3390 sge->Address.Low = htole32(sge->Address.Low);
3391 bcopy(sgep, cm->cm_sge, ieee_sge_size);
3392 cm->cm_sge = (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge +
3393 ieee_sge_size);
3394 return (0);
3395 }
3396
3397 /*
3398 * Add one dma segment to the scatter-gather list for a command.
3399 */
3400 int
mpr_add_dmaseg(struct mpr_command * cm,vm_paddr_t pa,size_t len,u_int flags,int segsleft)3401 mpr_add_dmaseg(struct mpr_command *cm, vm_paddr_t pa, size_t len, u_int flags,
3402 int segsleft)
3403 {
3404 MPI2_SGE_SIMPLE64 sge;
3405 MPI2_IEEE_SGE_SIMPLE64 ieee_sge;
3406
3407 if (!(cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE)) {
3408 ieee_sge.Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
3409 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
3410 ieee_sge.Length = len;
3411 mpr_from_u64(pa, &ieee_sge.Address);
3412
3413 return (mpr_push_ieee_sge(cm, &ieee_sge, segsleft));
3414 } else {
3415 /*
3416 * This driver always uses 64-bit address elements for
3417 * simplicity.
3418 */
3419 flags |= MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
3420 MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
3421 /* Set Endian safe macro in mpr_push_sge */
3422 sge.FlagsLength = len | (flags << MPI2_SGE_FLAGS_SHIFT);
3423 mpr_from_u64(pa, &sge.Address);
3424
3425 return (mpr_push_sge(cm, &sge, sizeof sge, segsleft));
3426 }
3427 }
3428
3429 static void
mpr_data_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)3430 mpr_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
3431 {
3432 struct mpr_softc *sc;
3433 struct mpr_command *cm;
3434 u_int i, dir, sflags;
3435
3436 cm = (struct mpr_command *)arg;
3437 sc = cm->cm_sc;
3438
3439 /*
3440 * In this case, just print out a warning and let the chip tell the
3441 * user they did the wrong thing.
3442 */
3443 if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) {
3444 mpr_dprint(sc, MPR_ERROR, "%s: warning: busdma returned %d "
3445 "segments, more than the %d allowed\n", __func__, nsegs,
3446 cm->cm_max_segs);
3447 }
3448
3449 /*
3450 * Set up DMA direction flags. Bi-directional requests are also handled
3451 * here. In that case, both direction flags will be set.
3452 */
3453 sflags = 0;
3454 if (cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) {
3455 /*
3456 * We have to add a special case for SMP passthrough, there
3457 * is no easy way to generically handle it. The first
3458 * S/G element is used for the command (therefore the
3459 * direction bit needs to be set). The second one is used
3460 * for the reply. We'll leave it to the caller to make
3461 * sure we only have two buffers.
3462 */
3463 /*
3464 * Even though the busdma man page says it doesn't make
3465 * sense to have both direction flags, it does in this case.
3466 * We have one s/g element being accessed in each direction.
3467 */
3468 dir = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD;
3469
3470 /*
3471 * Set the direction flag on the first buffer in the SMP
3472 * passthrough request. We'll clear it for the second one.
3473 */
3474 sflags |= MPI2_SGE_FLAGS_DIRECTION |
3475 MPI2_SGE_FLAGS_END_OF_BUFFER;
3476 } else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) {
3477 sflags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
3478 dir = BUS_DMASYNC_PREWRITE;
3479 } else
3480 dir = BUS_DMASYNC_PREREAD;
3481
3482 /* Check if a native SG list is needed for an NVMe PCIe device. */
3483 if (cm->cm_targ && cm->cm_targ->is_nvme &&
3484 mpr_check_pcie_native_sgl(sc, cm, segs, nsegs) == 0) {
3485 /* A native SG list was built, skip to end. */
3486 goto out;
3487 }
3488
3489 for (i = 0; i < nsegs; i++) {
3490 if ((cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) && (i != 0)) {
3491 sflags &= ~MPI2_SGE_FLAGS_DIRECTION;
3492 }
3493 error = mpr_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len,
3494 sflags, nsegs - i);
3495 if (error != 0) {
3496 /* Resource shortage, roll back! */
3497 if (ratecheck(&sc->lastfail, &mpr_chainfail_interval))
3498 mpr_dprint(sc, MPR_INFO, "Out of chain frames, "
3499 "consider increasing hw.mpr.max_chains.\n");
3500 cm->cm_flags |= MPR_CM_FLAGS_CHAIN_FAILED;
3501 mpr_complete_command(sc, cm);
3502 return;
3503 }
3504 }
3505
3506 out:
3507 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
3508 mpr_enqueue_request(sc, cm);
3509
3510 return;
3511 }
3512
3513 static void
mpr_data_cb2(void * arg,bus_dma_segment_t * segs,int nsegs,bus_size_t mapsize,int error)3514 mpr_data_cb2(void *arg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize,
3515 int error)
3516 {
3517 mpr_data_cb(arg, segs, nsegs, error);
3518 }
3519
3520 /*
3521 * This is the routine to enqueue commands ansynchronously.
3522 * Note that the only error path here is from bus_dmamap_load(), which can
3523 * return EINPROGRESS if it is waiting for resources. Other than this, it's
3524 * assumed that if you have a command in-hand, then you have enough credits
3525 * to use it.
3526 */
3527 int
mpr_map_command(struct mpr_softc * sc,struct mpr_command * cm)3528 mpr_map_command(struct mpr_softc *sc, struct mpr_command *cm)
3529 {
3530 int error = 0;
3531
3532 if (cm->cm_flags & MPR_CM_FLAGS_USE_UIO) {
3533 error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap,
3534 &cm->cm_uio, mpr_data_cb2, cm, 0);
3535 } else if (cm->cm_flags & MPR_CM_FLAGS_USE_CCB) {
3536 error = bus_dmamap_load_ccb(sc->buffer_dmat, cm->cm_dmamap,
3537 cm->cm_data, mpr_data_cb, cm, 0);
3538 } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) {
3539 error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap,
3540 cm->cm_data, cm->cm_length, mpr_data_cb, cm, 0);
3541 } else {
3542 /* Add a zero-length element as needed */
3543 if (cm->cm_sge != NULL)
3544 mpr_add_dmaseg(cm, 0, 0, 0, 1);
3545 mpr_enqueue_request(sc, cm);
3546 }
3547
3548 return (error);
3549 }
3550
3551 /*
3552 * This is the routine to enqueue commands synchronously. An error of
3553 * EINPROGRESS from mpr_map_command() is ignored since the command will
3554 * be executed and enqueued automatically. Other errors come from msleep().
3555 */
3556 int
mpr_wait_command(struct mpr_softc * sc,struct mpr_command ** cmp,int timeout,int sleep_flag)3557 mpr_wait_command(struct mpr_softc *sc, struct mpr_command **cmp, int timeout,
3558 int sleep_flag)
3559 {
3560 int error, rc;
3561 struct timeval cur_time, start_time;
3562 struct mpr_command *cm = *cmp;
3563
3564 if (sc->mpr_flags & MPR_FLAGS_DIAGRESET)
3565 return EBUSY;
3566
3567 cm->cm_complete = NULL;
3568 cm->cm_flags |= (MPR_CM_FLAGS_WAKEUP + MPR_CM_FLAGS_POLLED);
3569 error = mpr_map_command(sc, cm);
3570 if ((error != 0) && (error != EINPROGRESS))
3571 return (error);
3572
3573 // Check for context and wait for 50 mSec at a time until time has
3574 // expired or the command has finished. If msleep can't be used, need
3575 // to poll.
3576 getmicrouptime(&start_time);
3577 if (lockowned(&sc->mpr_lock) && sleep_flag == CAN_SLEEP) {
3578 error = lksleep(cm, &sc->mpr_lock, 0, "mprwait", timeout*hz);
3579 if (error == EWOULDBLOCK) {
3580 /*
3581 * Record the actual elapsed time in the case of a
3582 * timeout for the message below.
3583 */
3584 getmicrouptime(&cur_time);
3585 timevalsub(&cur_time, &start_time);
3586 }
3587 } else {
3588 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) {
3589 mpr_intr_locked(sc);
3590 if (sleep_flag == CAN_SLEEP)
3591 tsleep(mpr_wait_command, 0, "mprwait", hz < 20 ? 1 : hz / 20);
3592 else
3593 DELAY(50000);
3594
3595 getmicrouptime(&cur_time);
3596 timevalsub(&cur_time, &start_time);
3597 if (cur_time.tv_sec > timeout) {
3598 error = EWOULDBLOCK;
3599 break;
3600 }
3601 }
3602 }
3603
3604 if (error == EWOULDBLOCK) {
3605 mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s, timeout=%d,"
3606 " elapsed=%jd\n", __func__, timeout,
3607 (intmax_t)cur_time.tv_sec);
3608 rc = mpr_reinit(sc);
3609 mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" :
3610 "failed");
3611 if (sc->mpr_flags & MPR_FLAGS_REALLOCATED) {
3612 /*
3613 * Tell the caller that we freed the command in a
3614 * reinit.
3615 */
3616 *cmp = NULL;
3617 }
3618 error = ETIMEDOUT;
3619 }
3620 return (error);
3621 }
3622
3623 /*
3624 * This is the routine to enqueue a command synchonously and poll for
3625 * completion. Its use should be rare.
3626 */
3627 int
mpr_request_polled(struct mpr_softc * sc,struct mpr_command ** cmp)3628 mpr_request_polled(struct mpr_softc *sc, struct mpr_command **cmp)
3629 {
3630 int error, rc;
3631 struct timeval cur_time, start_time;
3632 struct mpr_command *cm = *cmp;
3633
3634 error = 0;
3635
3636 cm->cm_flags |= MPR_CM_FLAGS_POLLED;
3637 cm->cm_complete = NULL;
3638 mpr_map_command(sc, cm);
3639
3640 getmicrouptime(&start_time);
3641 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) {
3642 mpr_intr_locked(sc);
3643
3644 if (lockowned(&sc->mpr_lock))
3645 lksleep(&sc->msleep_fake_chan, &sc->mpr_lock, 0,
3646 "mprpoll", hz < 20 ? 1 : hz / 20);
3647 else
3648 tsleep(mpr_request_polled, 0, "mprpoll", hz < 20 ? 1 : hz / 20);
3649
3650 /*
3651 * Check for real-time timeout and fail if more than 60 seconds.
3652 */
3653 getmicrouptime(&cur_time);
3654 timevalsub(&cur_time, &start_time);
3655 if (cur_time.tv_sec > 60) {
3656 mpr_dprint(sc, MPR_FAULT, "polling failed\n");
3657 error = ETIMEDOUT;
3658 break;
3659 }
3660 }
3661
3662 if (error) {
3663 mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s\n", __func__);
3664 rc = mpr_reinit(sc);
3665 mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" :
3666 "failed");
3667
3668 if (sc->mpr_flags & MPR_FLAGS_REALLOCATED) {
3669 /*
3670 * Tell the caller that we freed the command in a
3671 * reinit.
3672 */
3673 *cmp = NULL;
3674 }
3675 }
3676 return (error);
3677 }
3678
3679 /*
3680 * The MPT driver had a verbose interface for config pages. In this driver,
3681 * reduce it to much simpler terms, similar to the Linux driver.
3682 */
3683 int
mpr_read_config_page(struct mpr_softc * sc,struct mpr_config_params * params)3684 mpr_read_config_page(struct mpr_softc *sc, struct mpr_config_params *params)
3685 {
3686 MPI2_CONFIG_REQUEST *req;
3687 struct mpr_command *cm;
3688 int error;
3689
3690 if (sc->mpr_flags & MPR_FLAGS_BUSY) {
3691 return (EBUSY);
3692 }
3693
3694 cm = mpr_alloc_command(sc);
3695 if (cm == NULL) {
3696 return (EBUSY);
3697 }
3698
3699 req = (MPI2_CONFIG_REQUEST *)cm->cm_req;
3700 req->Function = MPI2_FUNCTION_CONFIG;
3701 req->Action = params->action;
3702 req->SGLFlags = 0;
3703 req->ChainOffset = 0;
3704 req->PageAddress = params->page_address;
3705 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) {
3706 MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr;
3707
3708 hdr = ¶ms->hdr.Ext;
3709 req->ExtPageType = hdr->ExtPageType;
3710 req->ExtPageLength = hdr->ExtPageLength;
3711 req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
3712 req->Header.PageLength = 0; /* Must be set to zero */
3713 req->Header.PageNumber = hdr->PageNumber;
3714 req->Header.PageVersion = hdr->PageVersion;
3715 } else {
3716 MPI2_CONFIG_PAGE_HEADER *hdr;
3717
3718 hdr = ¶ms->hdr.Struct;
3719 req->Header.PageType = hdr->PageType;
3720 req->Header.PageNumber = hdr->PageNumber;
3721 req->Header.PageLength = hdr->PageLength;
3722 req->Header.PageVersion = hdr->PageVersion;
3723 }
3724
3725 cm->cm_data = params->buffer;
3726 cm->cm_length = params->length;
3727 if (cm->cm_data != NULL) {
3728 cm->cm_sge = &req->PageBufferSGE;
3729 cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
3730 cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN;
3731 } else
3732 cm->cm_sge = NULL;
3733 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3734
3735 cm->cm_complete_data = params;
3736 if (params->callback != NULL) {
3737 cm->cm_complete = mpr_config_complete;
3738 return (mpr_map_command(sc, cm));
3739 } else {
3740 error = mpr_wait_command(sc, &cm, 0, CAN_SLEEP);
3741 if (error) {
3742 mpr_dprint(sc, MPR_FAULT,
3743 "Error %d reading config page\n", error);
3744 if (cm != NULL)
3745 mpr_free_command(sc, cm);
3746 return (error);
3747 }
3748 mpr_config_complete(sc, cm);
3749 }
3750
3751 return (0);
3752 }
3753
3754 int
mpr_write_config_page(struct mpr_softc * sc,struct mpr_config_params * params)3755 mpr_write_config_page(struct mpr_softc *sc, struct mpr_config_params *params)
3756 {
3757 return (EINVAL);
3758 }
3759
3760 static void
mpr_config_complete(struct mpr_softc * sc,struct mpr_command * cm)3761 mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm)
3762 {
3763 MPI2_CONFIG_REPLY *reply;
3764 struct mpr_config_params *params;
3765
3766 MPR_FUNCTRACE(sc);
3767 params = cm->cm_complete_data;
3768
3769 if (cm->cm_data != NULL) {
3770 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
3771 BUS_DMASYNC_POSTREAD);
3772 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
3773 }
3774
3775 /*
3776 * XXX KDM need to do more error recovery? This results in the
3777 * device in question not getting probed.
3778 */
3779 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3780 params->status = MPI2_IOCSTATUS_BUSY;
3781 goto done;
3782 }
3783
3784 reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
3785 if (reply == NULL) {
3786 params->status = MPI2_IOCSTATUS_BUSY;
3787 goto done;
3788 }
3789 params->status = reply->IOCStatus;
3790 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) {
3791 params->hdr.Ext.ExtPageType = reply->ExtPageType;
3792 params->hdr.Ext.ExtPageLength = reply->ExtPageLength;
3793 params->hdr.Ext.PageType = reply->Header.PageType;
3794 params->hdr.Ext.PageNumber = reply->Header.PageNumber;
3795 params->hdr.Ext.PageVersion = reply->Header.PageVersion;
3796 } else {
3797 params->hdr.Struct.PageType = reply->Header.PageType;
3798 params->hdr.Struct.PageNumber = reply->Header.PageNumber;
3799 params->hdr.Struct.PageLength = reply->Header.PageLength;
3800 params->hdr.Struct.PageVersion = reply->Header.PageVersion;
3801 }
3802
3803 done:
3804 mpr_free_command(sc, cm);
3805 if (params->callback != NULL)
3806 params->callback(sc, params);
3807
3808 return;
3809 }
3810