xref: /freebsd/sys/powerpc/pseries/phyp_vscsi.c (revision 315ee00f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright 2013 Nathan Whitehorn
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/malloc.h>
34 #include <sys/module.h>
35 #include <sys/selinfo.h>
36 #include <sys/bus.h>
37 #include <sys/conf.h>
38 #include <sys/eventhandler.h>
39 #include <sys/rman.h>
40 #include <sys/bus_dma.h>
41 #include <sys/bio.h>
42 #include <sys/ioccom.h>
43 #include <sys/uio.h>
44 #include <sys/proc.h>
45 #include <sys/signalvar.h>
46 #include <sys/sysctl.h>
47 #include <sys/endian.h>
48 #include <sys/vmem.h>
49 
50 #include <cam/cam.h>
51 #include <cam/cam_ccb.h>
52 #include <cam/cam_debug.h>
53 #include <cam/cam_periph.h>
54 #include <cam/cam_sim.h>
55 #include <cam/cam_xpt_periph.h>
56 #include <cam/cam_xpt_sim.h>
57 #include <cam/scsi/scsi_all.h>
58 #include <cam/scsi/scsi_message.h>
59 
60 #include <dev/ofw/openfirm.h>
61 #include <dev/ofw/ofw_bus.h>
62 #include <dev/ofw/ofw_bus_subr.h>
63 
64 #include <machine/bus.h>
65 #include <machine/resource.h>
66 
67 #include <powerpc/pseries/phyp-hvcall.h>
68 
69 struct vscsi_softc;
70 
71 /* VSCSI CRQ format from table 260 of PAPR spec 2.4 (page 760) */
72 struct vscsi_crq {
73 	uint8_t valid;
74 	uint8_t format;
75 	uint8_t reserved;
76 	uint8_t status;
77 	uint16_t timeout;
78 	uint16_t iu_length;
79 	uint64_t iu_data;
80 };
81 
82 struct vscsi_xfer {
83         TAILQ_ENTRY(vscsi_xfer) queue;
84         struct vscsi_softc *sc;
85         union ccb *ccb;
86         bus_dmamap_t dmamap;
87         uint64_t tag;
88 
89 	vmem_addr_t srp_iu_offset;
90 	vmem_size_t srp_iu_size;
91 };
92 
93 TAILQ_HEAD(vscsi_xferq, vscsi_xfer);
94 
95 struct vscsi_softc {
96 	device_t	dev;
97 	struct cam_devq *devq;
98 	struct cam_sim	*sim;
99 	struct cam_path	*path;
100 	struct mtx io_lock;
101 
102 	cell_t		unit;
103 	int		bus_initialized;
104 	int		bus_logged_in;
105 	int		max_transactions;
106 
107 	int		irqid;
108 	struct resource	*irq;
109 	void		*irq_cookie;
110 
111 	bus_dma_tag_t	crq_tag;
112 	struct vscsi_crq *crq_queue;
113 	int		n_crqs, cur_crq;
114 	bus_dmamap_t	crq_map;
115 	bus_addr_t	crq_phys;
116 
117 	vmem_t		*srp_iu_arena;
118 	void		*srp_iu_queue;
119 	bus_addr_t	srp_iu_phys;
120 
121 	bus_dma_tag_t	data_tag;
122 
123 	struct vscsi_xfer loginxp;
124 	struct vscsi_xfer *xfer;
125 	struct vscsi_xferq active_xferq;
126 	struct vscsi_xferq free_xferq;
127 };
128 
129 struct srp_login {
130 	uint8_t type;
131 	uint8_t reserved[7];
132 	uint64_t tag;
133 	uint64_t max_cmd_length;
134 	uint32_t reserved2;
135 	uint16_t buffer_formats;
136 	uint8_t flags;
137 	uint8_t reserved3[5];
138 	uint8_t initiator_port_id[16];
139 	uint8_t target_port_id[16];
140 } __packed;
141 
142 struct srp_login_rsp {
143 	uint8_t type;
144 	uint8_t reserved[3];
145 	uint32_t request_limit_delta;
146 	uint8_t tag;
147 	uint32_t max_i_to_t_len;
148 	uint32_t max_t_to_i_len;
149 	uint16_t buffer_formats;
150 	uint8_t flags;
151 	/* Some reserved bits follow */
152 } __packed;
153 
154 struct srp_cmd {
155 	uint8_t type;
156 	uint8_t flags1;
157 	uint8_t reserved[3];
158 	uint8_t formats;
159 	uint8_t out_buffer_count;
160 	uint8_t in_buffer_count;
161 	uint64_t tag;
162 	uint32_t reserved2;
163 	uint64_t lun;
164 	uint8_t reserved3[3];
165 	uint8_t additional_cdb;
166 	uint8_t cdb[16];
167 	uint8_t data_payload[0];
168 } __packed;
169 
170 struct srp_rsp {
171 	uint8_t type;
172 	uint8_t reserved[3];
173 	uint32_t request_limit_delta;
174 	uint64_t tag;
175 	uint16_t reserved2;
176 	uint8_t flags;
177 	uint8_t status;
178 	uint32_t data_out_resid;
179 	uint32_t data_in_resid;
180 	uint32_t sense_data_len;
181 	uint32_t response_data_len;
182 	uint8_t data_payload[0];
183 } __packed;
184 
185 struct srp_tsk_mgmt {
186 	uint8_t type;
187 	uint8_t reserved[7];
188 	uint64_t tag;
189 	uint32_t reserved2;
190 	uint64_t lun;
191 	uint8_t reserved3[2];
192 	uint8_t function;
193 	uint8_t reserved4;
194 	uint64_t manage_tag;
195 	uint64_t reserved5;
196 } __packed;
197 
198 /* Message code type */
199 #define SRP_LOGIN_REQ	0x00
200 #define SRP_TSK_MGMT	0x01
201 #define SRP_CMD		0x02
202 #define SRP_I_LOGOUT	0x03
203 
204 #define SRP_LOGIN_RSP	0xC0
205 #define SRP_RSP		0xC1
206 #define SRP_LOGIN_REJ	0xC2
207 
208 #define SRP_T_LOGOUT	0x80
209 #define SRP_CRED_REQ	0x81
210 #define SRP_AER_REQ	0x82
211 
212 #define SRP_CRED_RSP	0x41
213 #define SRP_AER_RSP	0x41
214 
215 /* Flags for srp_rsp flags field */
216 #define SRP_RSPVALID	0x01
217 #define SRP_SNSVALID	0x02
218 #define SRP_DOOVER	0x04
219 #define SRP_DOUNDER	0x08
220 #define SRP_DIOVER	0x10
221 #define SRP_DIUNDER	0x20
222 
223 #define	MAD_SUCESS			0x00
224 #define	MAD_NOT_SUPPORTED		0xf1
225 #define	MAD_FAILED			0xf7
226 
227 #define	MAD_EMPTY_IU			0x01
228 #define	MAD_ERROR_LOGGING_REQUEST	0x02
229 #define	MAD_ADAPTER_INFO_REQUEST	0x03
230 #define	MAD_CAPABILITIES_EXCHANGE	0x05
231 #define	MAD_PHYS_ADAP_INFO_REQUEST	0x06
232 #define	MAD_TAPE_PASSTHROUGH_REQUEST	0x07
233 #define	MAD_ENABLE_FAST_FAIL		0x08
234 
235 static int	vscsi_probe(device_t);
236 static int	vscsi_attach(device_t);
237 static int	vscsi_detach(device_t);
238 static void	vscsi_cam_action(struct cam_sim *, union ccb *);
239 static void	vscsi_cam_poll(struct cam_sim *);
240 static void	vscsi_intr(void *arg);
241 static void	vscsi_check_response_queue(struct vscsi_softc *sc);
242 static void	vscsi_setup_bus(struct vscsi_softc *sc);
243 
244 static void	vscsi_srp_login(struct vscsi_softc *sc);
245 static void	vscsi_crq_load_cb(void *, bus_dma_segment_t *, int, int);
246 static void	vscsi_scsi_command(void *xxp, bus_dma_segment_t *segs,
247 		    int nsegs, int err);
248 static void	vscsi_task_management(struct vscsi_softc *sc, union ccb *ccb);
249 static void	vscsi_srp_response(struct vscsi_xfer *, struct vscsi_crq *);
250 
251 static device_method_t	vscsi_methods[] = {
252 	DEVMETHOD(device_probe,		vscsi_probe),
253 	DEVMETHOD(device_attach,	vscsi_attach),
254 	DEVMETHOD(device_detach,	vscsi_detach),
255 
256 	DEVMETHOD_END
257 };
258 
259 static driver_t vscsi_driver = {
260 	"vscsi",
261 	vscsi_methods,
262 	sizeof(struct vscsi_softc)
263 };
264 
265 DRIVER_MODULE(vscsi, vdevice, vscsi_driver, 0, 0);
266 MALLOC_DEFINE(M_VSCSI, "vscsi", "CAM device queue for VSCSI");
267 
268 static int
269 vscsi_probe(device_t dev)
270 {
271 
272 	if (!ofw_bus_is_compatible(dev, "IBM,v-scsi"))
273 		return (ENXIO);
274 
275 	device_set_desc(dev, "POWER Hypervisor Virtual SCSI Bus");
276 	return (0);
277 }
278 
279 static int
280 vscsi_attach(device_t dev)
281 {
282 	struct vscsi_softc *sc;
283 	struct vscsi_xfer *xp;
284 	int error, i;
285 
286 	sc = device_get_softc(dev);
287 	if (sc == NULL)
288 		return (EINVAL);
289 
290 	sc->dev = dev;
291 	mtx_init(&sc->io_lock, "vscsi", NULL, MTX_DEF);
292 
293 	/* Get properties */
294 	OF_getencprop(ofw_bus_get_node(dev), "reg", &sc->unit,
295 	    sizeof(sc->unit));
296 
297 	/* Setup interrupt */
298 	sc->irqid = 0;
299 	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
300 	    RF_ACTIVE);
301 
302 	if (!sc->irq) {
303 		device_printf(dev, "Could not allocate IRQ\n");
304 		mtx_destroy(&sc->io_lock);
305 		return (ENXIO);
306 	}
307 
308 	bus_setup_intr(dev, sc->irq, INTR_TYPE_CAM | INTR_MPSAFE |
309 	    INTR_ENTROPY, NULL, vscsi_intr, sc, &sc->irq_cookie);
310 
311 	/* Data DMA */
312 	error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
313 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE,
314 	    256, BUS_SPACE_MAXSIZE_32BIT, 0, busdma_lock_mutex, &sc->io_lock,
315 	    &sc->data_tag);
316 
317 	TAILQ_INIT(&sc->active_xferq);
318 	TAILQ_INIT(&sc->free_xferq);
319 
320 	/* First XFER for login data */
321 	sc->loginxp.sc = sc;
322 	bus_dmamap_create(sc->data_tag, 0, &sc->loginxp.dmamap);
323 	TAILQ_INSERT_TAIL(&sc->free_xferq, &sc->loginxp, queue);
324 
325 	/* CRQ area */
326 	error = bus_dma_tag_create(bus_get_dma_tag(dev), PAGE_SIZE, 0,
327 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 8*PAGE_SIZE,
328 	    1, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->crq_tag);
329 	error = bus_dmamem_alloc(sc->crq_tag, (void **)&sc->crq_queue,
330 	    BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->crq_map);
331 	sc->crq_phys = 0;
332 	sc->n_crqs = 0;
333 	error = bus_dmamap_load(sc->crq_tag, sc->crq_map, sc->crq_queue,
334 	    8*PAGE_SIZE, vscsi_crq_load_cb, sc, 0);
335 
336 	mtx_lock(&sc->io_lock);
337 	vscsi_setup_bus(sc);
338 	sc->xfer = malloc(sizeof(sc->xfer[0])*sc->max_transactions, M_VSCSI,
339 	    M_NOWAIT);
340 	for (i = 0; i < sc->max_transactions; i++) {
341 		xp = &sc->xfer[i];
342 		xp->sc = sc;
343 
344 		error = bus_dmamap_create(sc->data_tag, 0, &xp->dmamap);
345 		if (error) {
346 			device_printf(dev, "Could not create DMA map (%d)\n",
347 			    error);
348 			break;
349 		}
350 
351 		TAILQ_INSERT_TAIL(&sc->free_xferq, xp, queue);
352 	}
353 	mtx_unlock(&sc->io_lock);
354 
355 	/* Allocate CAM bits */
356 	if ((sc->devq = cam_simq_alloc(sc->max_transactions)) == NULL)
357 		return (ENOMEM);
358 
359 	sc->sim = cam_sim_alloc(vscsi_cam_action, vscsi_cam_poll, "vscsi", sc,
360 				device_get_unit(dev), &sc->io_lock,
361 				sc->max_transactions, sc->max_transactions,
362 				sc->devq);
363 	if (sc->sim == NULL) {
364 		cam_simq_free(sc->devq);
365 		sc->devq = NULL;
366 		device_printf(dev, "CAM SIM attach failed\n");
367 		return (EINVAL);
368 	}
369 
370 	mtx_lock(&sc->io_lock);
371 	if (xpt_bus_register(sc->sim, dev, 0) != 0) {
372 		device_printf(dev, "XPT bus registration failed\n");
373 		cam_sim_free(sc->sim, FALSE);
374 		sc->sim = NULL;
375 		cam_simq_free(sc->devq);
376 		sc->devq = NULL;
377 		mtx_unlock(&sc->io_lock);
378 		return (EINVAL);
379 	}
380 	mtx_unlock(&sc->io_lock);
381 
382 	return (0);
383 }
384 
385 static int
386 vscsi_detach(device_t dev)
387 {
388 	struct vscsi_softc *sc;
389 
390 	sc = device_get_softc(dev);
391 	if (sc == NULL)
392 		return (EINVAL);
393 
394 	if (sc->sim != NULL) {
395 		mtx_lock(&sc->io_lock);
396 		xpt_bus_deregister(cam_sim_path(sc->sim));
397 		cam_sim_free(sc->sim, FALSE);
398 		sc->sim = NULL;
399 		mtx_unlock(&sc->io_lock);
400 	}
401 
402 	if (sc->devq != NULL) {
403 		cam_simq_free(sc->devq);
404 		sc->devq = NULL;
405 	}
406 
407 	mtx_destroy(&sc->io_lock);
408 
409 	return (0);
410 }
411 
412 static void
413 vscsi_cam_action(struct cam_sim *sim, union ccb *ccb)
414 {
415 	struct vscsi_softc *sc = cam_sim_softc(sim);
416 
417 	mtx_assert(&sc->io_lock, MA_OWNED);
418 
419 	switch (ccb->ccb_h.func_code) {
420 	case XPT_PATH_INQ:
421 	{
422 		struct ccb_pathinq *cpi = &ccb->cpi;
423 
424 		cpi->version_num = 1;
425 		cpi->hba_inquiry = PI_TAG_ABLE;
426 		cpi->hba_misc = PIM_EXTLUNS;
427 		cpi->target_sprt = 0;
428 		cpi->hba_eng_cnt = 0;
429 		cpi->max_target = 0;
430 		cpi->max_lun = 0;
431 		cpi->initiator_id = ~0;
432 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
433 		strlcpy(cpi->hba_vid, "IBM", HBA_IDLEN);
434 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
435 		cpi->unit_number = cam_sim_unit(sim);
436 		cpi->bus_id = cam_sim_bus(sim);
437 		cpi->base_transfer_speed = 150000;
438 		cpi->transport = XPORT_SRP;
439 		cpi->transport_version = 0;
440 		cpi->protocol = PROTO_SCSI;
441 		cpi->protocol_version = SCSI_REV_SPC4;
442 		cpi->ccb_h.status = CAM_REQ_CMP;
443 		break;
444 	}
445 	case XPT_RESET_BUS:
446 		ccb->ccb_h.status = CAM_REQ_CMP;
447 		break;
448 	case XPT_RESET_DEV:
449 		ccb->ccb_h.status = CAM_REQ_INPROG;
450 		vscsi_task_management(sc, ccb);
451 		return;
452 	case XPT_GET_TRAN_SETTINGS:
453 		ccb->cts.protocol = PROTO_SCSI;
454 		ccb->cts.protocol_version = SCSI_REV_SPC4;
455 		ccb->cts.transport = XPORT_SRP;
456 		ccb->cts.transport_version = 0;
457 		ccb->cts.proto_specific.valid = 0;
458 		ccb->cts.xport_specific.valid = 0;
459 		ccb->ccb_h.status = CAM_REQ_CMP;
460 		break;
461 	case XPT_SET_TRAN_SETTINGS:
462 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
463 		break;
464 	case XPT_SCSI_IO:
465 	{
466 		struct vscsi_xfer *xp;
467 
468 		ccb->ccb_h.status = CAM_REQ_INPROG;
469 
470 		xp = TAILQ_FIRST(&sc->free_xferq);
471 		if (xp == NULL)
472 			panic("SCSI queue flooded");
473 		xp->ccb = ccb;
474 		TAILQ_REMOVE(&sc->free_xferq, xp, queue);
475 		TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
476 		bus_dmamap_load_ccb(sc->data_tag, xp->dmamap,
477 		    ccb, vscsi_scsi_command, xp, 0);
478 
479 		return;
480 	}
481 	default:
482 		ccb->ccb_h.status = CAM_REQ_INVALID;
483 		break;
484 	}
485 
486 	xpt_done(ccb);
487 	return;
488 }
489 
490 static void
491 vscsi_srp_login(struct vscsi_softc *sc)
492 {
493 	struct vscsi_xfer *xp;
494 	struct srp_login *login;
495 	struct vscsi_crq crq;
496 	int err;
497 
498 	mtx_assert(&sc->io_lock, MA_OWNED);
499 
500 	xp = TAILQ_FIRST(&sc->free_xferq);
501 	if (xp == NULL)
502 		panic("SCSI queue flooded");
503 	xp->ccb = NULL;
504 	TAILQ_REMOVE(&sc->free_xferq, xp, queue);
505 	TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
506 
507 	/* Set up command */
508 	xp->srp_iu_size = 64;
509 	crq.iu_length = htobe16(xp->srp_iu_size);
510 	err = vmem_alloc(xp->sc->srp_iu_arena, xp->srp_iu_size,
511 	    M_BESTFIT | M_NOWAIT, &xp->srp_iu_offset);
512 	if (err)
513 		panic("Error during VMEM allocation (%d)", err);
514 
515 	login = (struct srp_login *)((uint8_t *)xp->sc->srp_iu_queue +
516 	    (uintptr_t)xp->srp_iu_offset);
517 	bzero(login, xp->srp_iu_size);
518 	login->type = SRP_LOGIN_REQ;
519 	login->tag = (uint64_t)(xp);
520 	login->max_cmd_length = htobe64(256);
521 	login->buffer_formats = htobe16(0x1 | 0x2); /* Direct and indirect */
522 	login->flags = 0;
523 
524 	/* Create CRQ entry */
525 	crq.valid = 0x80;
526 	crq.format = 0x01;
527 	crq.iu_data = htobe64(xp->sc->srp_iu_phys + xp->srp_iu_offset);
528 	bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_PREWRITE);
529 
530 	err = phyp_hcall(H_SEND_CRQ, xp->sc->unit,
531 	    be64toh(((uint64_t *)(&crq))[0]),
532 	    be64toh(((uint64_t *)(&crq))[1]));
533 	if (err != 0)
534 		panic("CRQ send failure (%d)", err);
535 }
536 
537 static void
538 vscsi_task_management(struct vscsi_softc *sc, union ccb *ccb)
539 {
540 	struct srp_tsk_mgmt *cmd;
541 	struct vscsi_xfer *xp;
542 	struct vscsi_crq crq;
543 	int err;
544 
545 	mtx_assert(&sc->io_lock, MA_OWNED);
546 
547 	xp = TAILQ_FIRST(&sc->free_xferq);
548 	if (xp == NULL)
549 		panic("SCSI queue flooded");
550 	xp->ccb = ccb;
551 	TAILQ_REMOVE(&sc->free_xferq, xp, queue);
552 	TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
553 
554 	xp->srp_iu_size = sizeof(*cmd);
555 	crq.iu_length = htobe16(xp->srp_iu_size);
556 	err = vmem_alloc(xp->sc->srp_iu_arena, xp->srp_iu_size,
557 	    M_BESTFIT | M_NOWAIT, &xp->srp_iu_offset);
558 	if (err)
559 		panic("Error during VMEM allocation (%d)", err);
560 
561 	cmd = (struct srp_tsk_mgmt *)((uint8_t *)xp->sc->srp_iu_queue +
562 	    (uintptr_t)xp->srp_iu_offset);
563 	bzero(cmd, xp->srp_iu_size);
564 	cmd->type = SRP_TSK_MGMT;
565 	cmd->tag = (uint64_t)xp;
566 	cmd->lun = htobe64(CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun));
567 
568 	switch (ccb->ccb_h.func_code) {
569 	case XPT_RESET_DEV:
570 		cmd->function = 0x08;
571 		break;
572 	default:
573 		panic("Unimplemented code %d", ccb->ccb_h.func_code);
574 		break;
575 	}
576 
577 	bus_dmamap_sync(xp->sc->crq_tag, xp->sc->crq_map, BUS_DMASYNC_PREWRITE);
578 
579 	/* Create CRQ entry */
580 	crq.valid = 0x80;
581 	crq.format = 0x01;
582 	crq.iu_data = htobe64(xp->sc->srp_iu_phys + xp->srp_iu_offset);
583 
584 	err = phyp_hcall(H_SEND_CRQ, xp->sc->unit,
585 	    be64toh(((uint64_t *)(&crq))[0]),
586 	    be64toh(((uint64_t *)(&crq))[1]));
587 	if (err != 0)
588 		panic("CRQ send failure (%d)", err);
589 }
590 
591 static void
592 vscsi_scsi_command(void *xxp, bus_dma_segment_t *segs, int nsegs, int err)
593 {
594 	struct vscsi_xfer *xp = xxp;
595 	uint8_t *cdb;
596 	union ccb *ccb = xp->ccb;
597 	struct srp_cmd *cmd;
598 	uint64_t chunk_addr;
599 	uint32_t chunk_size;
600 	int desc_start, i;
601 	struct vscsi_crq crq;
602 
603 	KASSERT(err == 0, ("DMA error %d\n", err));
604 
605 	mtx_assert(&xp->sc->io_lock, MA_OWNED);
606 
607 	cdb = (ccb->ccb_h.flags & CAM_CDB_POINTER) ?
608 	    ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes;
609 
610 	/* Command format from Table 20, page 37 of SRP spec */
611 	xp->srp_iu_size = 48 + ((nsegs > 1) ? 20 : 16) +
612 	    ((ccb->csio.cdb_len > 16) ? (ccb->csio.cdb_len - 16) : 0);
613 	crq.iu_length = htobe16(xp->srp_iu_size);
614 	if (nsegs > 1)
615 		xp->srp_iu_size += nsegs*16;
616 	xp->srp_iu_size = roundup(xp->srp_iu_size, 16);
617 	err = vmem_alloc(xp->sc->srp_iu_arena, xp->srp_iu_size,
618 	    M_BESTFIT | M_NOWAIT, &xp->srp_iu_offset);
619 	if (err)
620 		panic("Error during VMEM allocation (%d)", err);
621 
622 	cmd = (struct srp_cmd *)((uint8_t *)xp->sc->srp_iu_queue +
623 	    (uintptr_t)xp->srp_iu_offset);
624 	bzero(cmd, xp->srp_iu_size);
625 	cmd->type = SRP_CMD;
626 	if (ccb->csio.cdb_len > 16)
627 		cmd->additional_cdb = (ccb->csio.cdb_len - 16) << 2;
628 	memcpy(cmd->cdb, cdb, ccb->csio.cdb_len);
629 
630 	cmd->tag = (uint64_t)(xp); /* Let the responder find this again */
631 	cmd->lun = htobe64(CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun));
632 
633 	if (nsegs > 1) {
634 		/* Use indirect descriptors */
635 		switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
636 		case CAM_DIR_OUT:
637 			cmd->formats = (2 << 4);
638 			break;
639 		case CAM_DIR_IN:
640 			cmd->formats = 2;
641 			break;
642 		default:
643 			panic("Does not support bidirectional commands (%d)",
644 			    ccb->ccb_h.flags & CAM_DIR_MASK);
645 			break;
646 		}
647 
648 		desc_start = ((ccb->csio.cdb_len > 16) ?
649 		    ccb->csio.cdb_len - 16 : 0);
650 		chunk_addr = htobe64(xp->sc->srp_iu_phys + xp->srp_iu_offset + 20 +
651 		    desc_start + sizeof(*cmd));
652 		chunk_size = htobe32(16*nsegs);
653 		memcpy(&cmd->data_payload[desc_start], &chunk_addr, 8);
654 		memcpy(&cmd->data_payload[desc_start+12], &chunk_size, 4);
655 		chunk_size = 0;
656 		for (i = 0; i < nsegs; i++)
657 			chunk_size += segs[i].ds_len;
658 		chunk_size = htobe32(chunk_size);
659 		memcpy(&cmd->data_payload[desc_start+16], &chunk_size, 4);
660 		desc_start += 20;
661 		for (i = 0; i < nsegs; i++) {
662 			chunk_addr = htobe64(segs[i].ds_addr);
663 			chunk_size = htobe32(segs[i].ds_len);
664 
665 			memcpy(&cmd->data_payload[desc_start + 16*i],
666 			    &chunk_addr, 8);
667 			/* Set handle tag to 0 */
668 			memcpy(&cmd->data_payload[desc_start + 16*i + 12],
669 			    &chunk_size, 4);
670 		}
671 	} else if (nsegs == 1) {
672 		switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
673 		case CAM_DIR_OUT:
674 			cmd->formats = (1 << 4);
675 			break;
676 		case CAM_DIR_IN:
677 			cmd->formats = 1;
678 			break;
679 		default:
680 			panic("Does not support bidirectional commands (%d)",
681 			    ccb->ccb_h.flags & CAM_DIR_MASK);
682 			break;
683 		}
684 
685 		/*
686 		 * Memory descriptor:
687 		 * 8 byte address
688 		 * 4 byte handle
689 		 * 4 byte length
690 		 */
691 
692 		chunk_addr = htobe64(segs[0].ds_addr);
693 		chunk_size = htobe32(segs[0].ds_len);
694 		desc_start = ((ccb->csio.cdb_len > 16) ?
695 		    ccb->csio.cdb_len - 16 : 0);
696 
697 		memcpy(&cmd->data_payload[desc_start], &chunk_addr, 8);
698 		/* Set handle tag to 0 */
699 		memcpy(&cmd->data_payload[desc_start+12], &chunk_size, 4);
700 		KASSERT(xp->srp_iu_size >= 48 + ((ccb->csio.cdb_len > 16) ?
701 		    ccb->csio.cdb_len : 16), ("SRP IU command length"));
702 	} else {
703 		cmd->formats = 0;
704 	}
705 	bus_dmamap_sync(xp->sc->crq_tag, xp->sc->crq_map, BUS_DMASYNC_PREWRITE);
706 
707 	/* Create CRQ entry */
708 	crq.valid = 0x80;
709 	crq.format = 0x01;
710 	crq.iu_data = htobe64(xp->sc->srp_iu_phys + xp->srp_iu_offset);
711 
712 	err = phyp_hcall(H_SEND_CRQ, xp->sc->unit,
713 	    be64toh(((uint64_t *)(&crq))[0]),
714 	    be64toh(((uint64_t *)(&crq))[1]));
715 	if (err != 0)
716 		panic("CRQ send failure (%d)", err);
717 }
718 
719 static void
720 vscsi_crq_load_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int err)
721 {
722 	struct vscsi_softc *sc = xsc;
723 
724 	sc->crq_phys = segs[0].ds_addr;
725 	sc->n_crqs = PAGE_SIZE/sizeof(struct vscsi_crq);
726 
727 	sc->srp_iu_queue = (uint8_t *)(sc->crq_queue);
728 	sc->srp_iu_phys = segs[0].ds_addr;
729 	sc->srp_iu_arena = vmem_create("VSCSI SRP IU", PAGE_SIZE,
730 	    segs[0].ds_len - PAGE_SIZE, 16, 0, M_BESTFIT | M_NOWAIT);
731 }
732 
733 static void
734 vscsi_setup_bus(struct vscsi_softc *sc)
735 {
736 	struct vscsi_crq crq;
737 	struct vscsi_xfer *xp;
738 	int error;
739 
740 	struct {
741 		uint32_t type;
742 		uint16_t status;
743 		uint16_t length;
744 		uint64_t tag;
745 		uint64_t buffer;
746 		struct {
747 			char srp_version[8];
748 			char partition_name[96];
749 			uint32_t partition_number;
750 			uint32_t mad_version;
751 			uint32_t os_type;
752 			uint32_t port_max_txu[8];
753 		} payload;
754 	} mad_adapter_info;
755 
756 	bzero(&crq, sizeof(crq));
757 
758 	/* Init message */
759 	crq.valid = 0xc0;
760 	crq.format = 0x01;
761 
762 	do {
763 		error = phyp_hcall(H_FREE_CRQ, sc->unit);
764 	} while (error == H_BUSY);
765 
766 	/* See initialization sequence page 757 */
767 	bzero(sc->crq_queue, sc->n_crqs*sizeof(sc->crq_queue[0]));
768 	sc->cur_crq = 0;
769 	sc->bus_initialized = 0;
770 	sc->bus_logged_in = 0;
771 	bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_PREWRITE);
772 	error = phyp_hcall(H_REG_CRQ, sc->unit, sc->crq_phys,
773 	    sc->n_crqs*sizeof(sc->crq_queue[0]));
774 	KASSERT(error == 0, ("CRQ registration success"));
775 
776 	error = phyp_hcall(H_SEND_CRQ, sc->unit,
777 	    be64toh(((uint64_t *)(&crq))[0]),
778 	    be64toh(((uint64_t *)(&crq))[1]));
779 	if (error != 0)
780 		panic("CRQ setup failure (%d)", error);
781 
782 	while (sc->bus_initialized == 0)
783 		vscsi_check_response_queue(sc);
784 
785 	/* Send MAD adapter info */
786 	mad_adapter_info.type = htobe32(MAD_ADAPTER_INFO_REQUEST);
787 	mad_adapter_info.status = 0;
788 	mad_adapter_info.length = htobe16(sizeof(mad_adapter_info.payload));
789 
790 	strcpy(mad_adapter_info.payload.srp_version, "16.a");
791 	strcpy(mad_adapter_info.payload.partition_name, "UNKNOWN");
792 	mad_adapter_info.payload.partition_number = -1;
793 	mad_adapter_info.payload.mad_version = htobe32(1);
794 	mad_adapter_info.payload.os_type = htobe32(2); /* Claim we are Linux */
795 	mad_adapter_info.payload.port_max_txu[0] = 0;
796 	/* If this fails, we get the defaults above */
797 	OF_getprop(OF_finddevice("/"), "ibm,partition-name",
798 	    mad_adapter_info.payload.partition_name,
799 	    sizeof(mad_adapter_info.payload.partition_name));
800 	OF_getprop(OF_finddevice("/"), "ibm,partition-no",
801 	    &mad_adapter_info.payload.partition_number,
802 	    sizeof(mad_adapter_info.payload.partition_number));
803 
804 	xp = TAILQ_FIRST(&sc->free_xferq);
805 	xp->ccb = NULL;
806 	TAILQ_REMOVE(&sc->free_xferq, xp, queue);
807 	TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
808 	xp->srp_iu_size = sizeof(mad_adapter_info);
809 	crq.iu_length = htobe16(xp->srp_iu_size);
810 	vmem_alloc(xp->sc->srp_iu_arena, xp->srp_iu_size,
811 	    M_BESTFIT | M_NOWAIT, &xp->srp_iu_offset);
812 	mad_adapter_info.buffer = htobe64(xp->sc->srp_iu_phys + xp->srp_iu_offset + 24);
813 	mad_adapter_info.tag = (uint64_t)xp;
814 	memcpy((uint8_t *)xp->sc->srp_iu_queue + (uintptr_t)xp->srp_iu_offset,
815 		&mad_adapter_info, sizeof(mad_adapter_info));
816 	crq.valid = 0x80;
817 	crq.format = 0x02;
818 	crq.iu_data = htobe64(xp->sc->srp_iu_phys + xp->srp_iu_offset);
819 	bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_PREWRITE);
820 	phyp_hcall(H_SEND_CRQ, xp->sc->unit,
821 	    be64toh(((uint64_t *)(&crq))[0]),
822 	    be64toh(((uint64_t *)(&crq))[1]));
823 
824 	while (TAILQ_EMPTY(&sc->free_xferq))
825 		vscsi_check_response_queue(sc);
826 
827 	/* Send SRP login */
828 	vscsi_srp_login(sc);
829 	while (sc->bus_logged_in == 0)
830 		vscsi_check_response_queue(sc);
831 
832 	error = phyp_hcall(H_VIO_SIGNAL, sc->unit, 1); /* Enable interrupts */
833 }
834 
835 static void
836 vscsi_intr(void *xsc)
837 {
838 	struct vscsi_softc *sc = xsc;
839 
840 	mtx_lock(&sc->io_lock);
841 	vscsi_check_response_queue(sc);
842 	mtx_unlock(&sc->io_lock);
843 }
844 
845 static void
846 vscsi_srp_response(struct vscsi_xfer *xp, struct vscsi_crq *crq)
847 {
848 	union ccb *ccb = xp->ccb;
849 	struct vscsi_softc *sc = xp->sc;
850 	struct srp_rsp *rsp;
851 	uint32_t sense_len;
852 
853 	/* SRP response packet in original request */
854 	rsp = (struct srp_rsp *)((uint8_t *)sc->srp_iu_queue +
855 	    (uintptr_t)xp->srp_iu_offset);
856 	ccb->csio.scsi_status = rsp->status;
857 	if (ccb->csio.scsi_status == SCSI_STATUS_OK)
858 		ccb->ccb_h.status = CAM_REQ_CMP;
859 	else
860 		ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
861 #ifdef NOTYET
862 	/* Collect fast fail codes */
863 	if (crq->status != 0)
864 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
865 #endif
866 
867 	if (ccb->ccb_h.status != CAM_REQ_CMP) {
868 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
869 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
870 	}
871 
872 	if (!(rsp->flags & SRP_RSPVALID))
873 		rsp->response_data_len = 0;
874 	if (!(rsp->flags & SRP_SNSVALID))
875 		rsp->sense_data_len = 0;
876 	if (!(rsp->flags & (SRP_DOOVER | SRP_DOUNDER)))
877 		rsp->data_out_resid = 0;
878 	if (!(rsp->flags & (SRP_DIOVER | SRP_DIUNDER)))
879 		rsp->data_in_resid = 0;
880 
881 	if (rsp->flags & SRP_SNSVALID) {
882 		bzero(&ccb->csio.sense_data, sizeof(struct scsi_sense_data));
883 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
884 		sense_len = min(be32toh(rsp->sense_data_len),
885 		    ccb->csio.sense_len);
886 		memcpy(&ccb->csio.sense_data,
887 		    &rsp->data_payload[be32toh(rsp->response_data_len)],
888 		    sense_len);
889 		ccb->csio.sense_resid = ccb->csio.sense_len -
890 		    be32toh(rsp->sense_data_len);
891 	}
892 
893 	switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
894 	case CAM_DIR_OUT:
895 		ccb->csio.resid = rsp->data_out_resid;
896 		break;
897 	case CAM_DIR_IN:
898 		ccb->csio.resid = rsp->data_in_resid;
899 		break;
900 	}
901 
902 	bus_dmamap_sync(sc->data_tag, xp->dmamap, BUS_DMASYNC_POSTREAD);
903 	bus_dmamap_unload(sc->data_tag, xp->dmamap);
904 	xpt_done(ccb);
905 	xp->ccb = NULL;
906 }
907 
908 static void
909 vscsi_login_response(struct vscsi_xfer *xp, struct vscsi_crq *crq)
910 {
911 	struct vscsi_softc *sc = xp->sc;
912 	struct srp_login_rsp *rsp;
913 
914 	/* SRP response packet in original request */
915 	rsp = (struct srp_login_rsp *)((uint8_t *)sc->srp_iu_queue +
916 	    (uintptr_t)xp->srp_iu_offset);
917 	KASSERT(be16toh(rsp->buffer_formats) & 0x3, ("Both direct and indirect "
918 	    "buffers supported"));
919 
920 	sc->max_transactions = be32toh(rsp->request_limit_delta);
921 	device_printf(sc->dev, "Queue depth %d commands\n",
922 	    sc->max_transactions);
923 	sc->bus_logged_in = 1;
924 }
925 
926 static void
927 vscsi_cam_poll(struct cam_sim *sim)
928 {
929 	struct vscsi_softc *sc = cam_sim_softc(sim);
930 
931 	vscsi_check_response_queue(sc);
932 }
933 
934 static void
935 vscsi_check_response_queue(struct vscsi_softc *sc)
936 {
937 	struct vscsi_crq *crq;
938 	struct vscsi_xfer *xp;
939 	int code;
940 
941 	mtx_assert(&sc->io_lock, MA_OWNED);
942 
943 	while (sc->crq_queue[sc->cur_crq].valid != 0) {
944 		/* The hypercalls at both ends of this are not optimal */
945 		phyp_hcall(H_VIO_SIGNAL, sc->unit, 0);
946 		bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_POSTREAD);
947 
948 		crq = &sc->crq_queue[sc->cur_crq];
949 
950 		switch (crq->valid) {
951 		case 0xc0:
952 			if (crq->format == 0x02)
953 				sc->bus_initialized = 1;
954 			break;
955 		case 0x80:
956 			/* IU data is set to tag pointer (the XP) */
957 			xp = (struct vscsi_xfer *)crq->iu_data;
958 
959 			switch (crq->format) {
960 			case 0x01:
961 				code = *((uint8_t *)sc->srp_iu_queue +
962 	    			    (uintptr_t)xp->srp_iu_offset);
963 				switch (code) {
964 				case SRP_RSP:
965 					vscsi_srp_response(xp, crq);
966 					break;
967 				case SRP_LOGIN_RSP:
968 					vscsi_login_response(xp, crq);
969 					break;
970 				default:
971 					device_printf(sc->dev, "Unknown SRP "
972 					    "response code %d\n", code);
973 					break;
974 				}
975 				break;
976 			case 0x02:
977 				/* Ignore management datagrams */
978 				break;
979 			default:
980 				panic("Unknown CRQ format %d\n", crq->format);
981 				break;
982 			}
983 			vmem_free(sc->srp_iu_arena, xp->srp_iu_offset,
984 			    xp->srp_iu_size);
985 			TAILQ_REMOVE(&sc->active_xferq, xp, queue);
986 			TAILQ_INSERT_TAIL(&sc->free_xferq, xp, queue);
987 			break;
988 		default:
989 			device_printf(sc->dev,
990 			    "Unknown CRQ message type %d\n", crq->valid);
991 			break;
992 		}
993 
994 		crq->valid = 0;
995 		sc->cur_crq = (sc->cur_crq + 1) % sc->n_crqs;
996 
997 		bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_PREWRITE);
998 		phyp_hcall(H_VIO_SIGNAL, sc->unit, 1);
999 	}
1000 }
1001