xref: /openbsd/sys/arch/sparc64/dev/vdsk.c (revision a454aff3)
1 /*	$OpenBSD: vdsk.c,v 1.73 2022/04/16 19:19:58 naddy Exp $	*/
2 /*
3  * Copyright (c) 2009, 2011 Mark Kettenis
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/param.h>
19 #include <sys/buf.h>
20 #include <sys/device.h>
21 #include <sys/malloc.h>
22 #include <sys/systm.h>
23 
24 #include <machine/autoconf.h>
25 #include <machine/hypervisor.h>
26 
27 #include <uvm/uvm_extern.h>
28 
29 #include <scsi/scsi_all.h>
30 #include <scsi/cd.h>
31 #include <scsi/scsi_disk.h>
32 #include <scsi/scsiconf.h>
33 
34 #include <sparc64/dev/cbusvar.h>
35 #include <sparc64/dev/ldcvar.h>
36 #include <sparc64/dev/viovar.h>
37 
38 #ifdef VDSK_DEBUG
39 #define DPRINTF(x)	printf x
40 #else
41 #define DPRINTF(x)
42 #endif
43 
44 #define VDSK_TX_ENTRIES		32
45 #define VDSK_RX_ENTRIES		32
46 
47 struct vd_attr_info {
48 	struct vio_msg_tag	tag;
49 	uint8_t			xfer_mode;
50 	uint8_t			vd_type;
51 	uint8_t			vd_mtype;
52 	uint8_t			_reserved1;
53 	uint32_t		vdisk_block_size;
54 	uint64_t		operations;
55 	uint64_t		vdisk_size;
56 	uint64_t		max_xfer_sz;
57 	uint64_t		_reserved2[2];
58 };
59 
60 #define VD_DISK_TYPE_SLICE	0x01
61 #define VD_DISK_TYPE_DISK	0x02
62 
63 #define VD_MEDIA_TYPE_FIXED	0x01
64 #define VD_MEDIA_TYPE_CD	0x02
65 #define VD_MEDIA_TYPE_DVD	0x03
66 
67 /* vDisk version 1.0. */
68 #define VD_OP_BREAD		0x01
69 #define VD_OP_BWRITE		0x02
70 #define VD_OP_FLUSH		0x03
71 #define VD_OP_GET_WCE		0x04
72 #define VD_OP_SET_WCE		0x05
73 #define VD_OP_GET_VTOC		0x06
74 #define VD_OP_SET_VTOC		0x07
75 #define VD_OP_GET_DISKGEOM	0x08
76 #define VD_OP_SET_DISKGEOM	0x09
77 #define VD_OP_GET_DEVID		0x0b
78 #define VD_OP_GET_EFI		0x0c
79 #define VD_OP_SET_EFI		0x0d
80 
81 /* vDisk version 1.1 */
82 #define VD_OP_SCSICMD		0x0a
83 #define VD_OP_RESET		0x0e
84 #define VD_OP_GET_ACCESS	0x0f
85 #define VD_OP_SET_ACCESS	0x10
86 #define VD_OP_GET_CAPACITY	0x11
87 
88 struct vd_desc {
89 	struct vio_dring_hdr	hdr;
90 	uint64_t		req_id;
91 	uint8_t			operation;
92 	uint8_t			slice;
93 	uint16_t		_reserved1;
94 	uint32_t		status;
95 	uint64_t		offset;
96 	uint64_t		size;
97 	uint32_t		ncookies;
98 	uint32_t		_reserved2;
99 	struct ldc_cookie	cookie[MAXPHYS / PAGE_SIZE];
100 };
101 
102 #define VD_SLICE_NONE		0xff
103 
104 struct vdsk_dring {
105 	bus_dmamap_t		vd_map;
106 	bus_dma_segment_t	vd_seg;
107 	struct vd_desc		*vd_desc;
108 	int			vd_nentries;
109 };
110 
111 struct vdsk_dring *vdsk_dring_alloc(bus_dma_tag_t, int);
112 void	vdsk_dring_free(bus_dma_tag_t, struct vdsk_dring *);
113 
114 /*
115  * We support vDisk 1.0 and 1.1.
116  */
117 #define VDSK_MAJOR	1
118 #define VDSK_MINOR	1
119 
120 struct vdsk_soft_desc {
121 	int		vsd_map_idx[MAXPHYS / PAGE_SIZE];
122 	struct scsi_xfer *vsd_xs;
123 	int		vsd_ncookies;
124 };
125 
126 struct vdsk_softc {
127 	struct device	sc_dv;
128 	bus_space_tag_t	sc_bustag;
129 	bus_dma_tag_t	sc_dmatag;
130 
131 	void		*sc_tx_ih;
132 	void		*sc_rx_ih;
133 
134 	struct ldc_conn	sc_lc;
135 
136 	uint16_t	sc_vio_state;
137 #define VIO_SND_VER_INFO	0x0001
138 #define VIO_ACK_VER_INFO	0x0002
139 #define VIO_SND_ATTR_INFO	0x0004
140 #define VIO_ACK_ATTR_INFO	0x0008
141 #define VIO_SND_DRING_REG	0x0010
142 #define VIO_ACK_DRING_REG	0x0020
143 #define VIO_SND_RDX		0x0040
144 #define VIO_ACK_RDX		0x0080
145 #define VIO_ESTABLISHED		0x00ff
146 
147 	uint16_t	sc_major;
148 	uint16_t	sc_minor;
149 
150 	uint32_t	sc_local_sid;
151 	uint64_t	sc_dring_ident;
152 	uint64_t	sc_seq_no;
153 
154 	int		sc_tx_cnt;
155 	int		sc_tx_prod;
156 	int		sc_tx_cons;
157 
158 	struct ldc_map	*sc_lm;
159 	struct vdsk_dring *sc_vd;
160 	struct vdsk_soft_desc *sc_vsd;
161 
162 	struct scsi_iopool sc_iopool;
163 
164 	uint32_t	sc_vdisk_block_size;
165 	uint64_t	sc_vdisk_size;
166 	uint8_t		sc_vd_mtype;
167 };
168 
169 int	vdsk_match(struct device *, void *, void *);
170 void	vdsk_attach(struct device *, struct device *, void *);
171 
172 const struct cfattach vdsk_ca = {
173 	sizeof(struct vdsk_softc), vdsk_match, vdsk_attach
174 };
175 
176 struct cfdriver vdsk_cd = {
177 	NULL, "vdsk", DV_DULL
178 };
179 
180 void	vdsk_scsi_cmd(struct scsi_xfer *);
181 
182 const struct scsi_adapter vdsk_switch = {
183 	vdsk_scsi_cmd, NULL, NULL, NULL, NULL
184 };
185 
186 int	vdsk_tx_intr(void *);
187 int	vdsk_rx_intr(void *);
188 
189 void	vdsk_rx_data(struct ldc_conn *, struct ldc_pkt *);
190 void	vdsk_rx_vio_ctrl(struct vdsk_softc *, struct vio_msg *);
191 void	vdsk_rx_vio_ver_info(struct vdsk_softc *, struct vio_msg_tag *);
192 void	vdsk_rx_vio_attr_info(struct vdsk_softc *, struct vio_msg_tag *);
193 void	vdsk_rx_vio_dring_reg(struct vdsk_softc *, struct vio_msg_tag *);
194 void	vdsk_rx_vio_rdx(struct vdsk_softc *sc, struct vio_msg_tag *);
195 void	vdsk_rx_vio_data(struct vdsk_softc *sc, struct vio_msg *);
196 void	vdsk_rx_vio_dring_data(struct vdsk_softc *sc, struct vio_msg_tag *);
197 
198 void	vdsk_ldc_reset(struct ldc_conn *);
199 void	vdsk_ldc_start(struct ldc_conn *);
200 
201 void	vdsk_sendmsg(struct vdsk_softc *, void *, size_t);
202 void	vdsk_send_ver_info(struct vdsk_softc *, uint16_t, uint16_t);
203 void	vdsk_send_attr_info(struct vdsk_softc *);
204 void	vdsk_send_dring_reg(struct vdsk_softc *);
205 void	vdsk_send_rdx(struct vdsk_softc *);
206 
207 void	*vdsk_io_get(void *);
208 void	vdsk_io_put(void *, void *);
209 
210 int	vdsk_submit_cmd(struct scsi_xfer *);
211 void	vdsk_complete_cmd(struct scsi_xfer *, int);
212 
213 void	vdsk_scsi_inq(struct scsi_xfer *);
214 void	vdsk_scsi_inquiry(struct scsi_xfer *);
215 void	vdsk_scsi_capacity(struct scsi_xfer *);
216 void	vdsk_scsi_capacity16(struct scsi_xfer *);
217 void	vdsk_scsi_done(struct scsi_xfer *, int);
218 
219 int
vdsk_match(struct device * parent,void * match,void * aux)220 vdsk_match(struct device *parent, void *match, void *aux)
221 {
222 	struct cbus_attach_args *ca = aux;
223 
224 	if (strcmp(ca->ca_name, "disk") == 0)
225 		return (1);
226 
227 	return (0);
228 }
229 
230 void
vdsk_attach(struct device * parent,struct device * self,void * aux)231 vdsk_attach(struct device *parent, struct device *self, void *aux)
232 {
233 	struct vdsk_softc *sc = (struct vdsk_softc *)self;
234 	struct cbus_attach_args *ca = aux;
235 	struct scsibus_attach_args saa;
236 	struct ldc_conn *lc;
237 	int err, s;
238 	int timeout;
239 
240 	sc->sc_bustag = ca->ca_bustag;
241 	sc->sc_dmatag = ca->ca_dmatag;
242 
243 	printf(": ivec 0x%llx, 0x%llx", ca->ca_tx_ino, ca->ca_rx_ino);
244 
245 	/*
246 	 * Un-configure queues before registering interrupt handlers,
247 	 * such that we dont get any stale LDC packets or events.
248 	 */
249 	hv_ldc_tx_qconf(ca->ca_id, 0, 0);
250 	hv_ldc_rx_qconf(ca->ca_id, 0, 0);
251 
252 	sc->sc_tx_ih = bus_intr_establish(ca->ca_bustag, ca->ca_tx_ino,
253 	    IPL_BIO, 0, vdsk_tx_intr, sc, sc->sc_dv.dv_xname);
254 	sc->sc_rx_ih = bus_intr_establish(ca->ca_bustag, ca->ca_rx_ino,
255 	    IPL_BIO, 0, vdsk_rx_intr, sc, sc->sc_dv.dv_xname);
256 	if (sc->sc_tx_ih == NULL || sc->sc_rx_ih == NULL) {
257 		printf(", can't establish interrupt\n");
258 		return;
259 	}
260 
261 	lc = &sc->sc_lc;
262 	lc->lc_id = ca->ca_id;
263 	lc->lc_sc = sc;
264 	lc->lc_reset = vdsk_ldc_reset;
265 	lc->lc_start = vdsk_ldc_start;
266 	lc->lc_rx_data = vdsk_rx_data;
267 
268 	lc->lc_txq = ldc_queue_alloc(sc->sc_dmatag, VDSK_TX_ENTRIES);
269 	if (lc->lc_txq == NULL) {
270 		printf(", can't allocate tx queue\n");
271 		return;
272 	}
273 
274 	lc->lc_rxq = ldc_queue_alloc(sc->sc_dmatag, VDSK_RX_ENTRIES);
275 	if (lc->lc_rxq == NULL) {
276 		printf(", can't allocate rx queue\n");
277 		goto free_txqueue;
278 	}
279 
280 	sc->sc_lm = ldc_map_alloc(sc->sc_dmatag, 2048);
281 	if (sc->sc_lm == NULL) {
282 		printf(", can't allocate LDC mapping table\n");
283 		goto free_rxqueue;
284 	}
285 
286 	err = hv_ldc_set_map_table(lc->lc_id,
287 	    sc->sc_lm->lm_map->dm_segs[0].ds_addr, sc->sc_lm->lm_nentries);
288 	if (err != H_EOK) {
289 		printf("hv_ldc_set_map_table %d\n", err);
290 		goto free_map;
291 	}
292 
293 	sc->sc_vd = vdsk_dring_alloc(sc->sc_dmatag, 32);
294 	if (sc->sc_vd == NULL) {
295 		printf(", can't allocate dring\n");
296 		goto free_map;
297 	}
298 	sc->sc_vsd = malloc(32 * sizeof(*sc->sc_vsd), M_DEVBUF, M_NOWAIT);
299 	if (sc->sc_vsd == NULL) {
300 		printf(", can't allocate software ring\n");
301 		goto free_dring;
302 	}
303 
304 	sc->sc_lm->lm_slot[0].entry = sc->sc_vd->vd_map->dm_segs[0].ds_addr;
305 	sc->sc_lm->lm_slot[0].entry &= LDC_MTE_RA_MASK;
306 	sc->sc_lm->lm_slot[0].entry |= LDC_MTE_CPR | LDC_MTE_CPW;
307 	sc->sc_lm->lm_slot[0].entry |= LDC_MTE_R | LDC_MTE_W;
308 	sc->sc_lm->lm_next = 1;
309 	sc->sc_lm->lm_count = 1;
310 
311 	err = hv_ldc_tx_qconf(lc->lc_id,
312 	    lc->lc_txq->lq_map->dm_segs[0].ds_addr, lc->lc_txq->lq_nentries);
313 	if (err != H_EOK)
314 		printf("hv_ldc_tx_qconf %d\n", err);
315 
316 	err = hv_ldc_rx_qconf(lc->lc_id,
317 	    lc->lc_rxq->lq_map->dm_segs[0].ds_addr, lc->lc_rxq->lq_nentries);
318 	if (err != H_EOK)
319 		printf("hv_ldc_rx_qconf %d\n", err);
320 
321 	cbus_intr_setenabled(sc->sc_bustag, ca->ca_tx_ino, INTR_ENABLED);
322 	cbus_intr_setenabled(sc->sc_bustag, ca->ca_rx_ino, INTR_ENABLED);
323 
324 	ldc_send_vers(lc);
325 
326 	printf("\n");
327 
328 	/*
329 	 * Interrupts aren't enabled during autoconf, so poll for VIO
330 	 * peer-to-peer handshake completion.
331 	 */
332 	s = splbio();
333 	timeout = 1000;
334 	do {
335 		if (vdsk_rx_intr(sc) && sc->sc_vio_state == VIO_ESTABLISHED)
336 			break;
337 
338 		delay(1000);
339 	} while(--timeout > 0);
340 	splx(s);
341 
342 	if (sc->sc_vio_state != VIO_ESTABLISHED)
343 		return;
344 
345 	scsi_iopool_init(&sc->sc_iopool, sc, vdsk_io_get, vdsk_io_put);
346 
347 
348 	saa.saa_adapter = &vdsk_switch;
349 	saa.saa_adapter_softc = self;
350 	saa.saa_adapter_buswidth = 1;
351 	saa.saa_luns = 1;
352 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
353 	saa.saa_openings = sc->sc_vd->vd_nentries - 1;
354 	saa.saa_pool = &sc->sc_iopool;
355 	saa.saa_quirks = saa.saa_flags = 0;
356 	saa.saa_wwpn = saa.saa_wwnn = 0;
357 
358 	config_found(self, &saa, scsiprint);
359 
360 	return;
361 
362 free_dring:
363 	vdsk_dring_free(sc->sc_dmatag, sc->sc_vd);
364 free_map:
365 	hv_ldc_set_map_table(lc->lc_id, 0, 0);
366 	ldc_map_free(sc->sc_dmatag, sc->sc_lm);
367 free_rxqueue:
368 	ldc_queue_free(sc->sc_dmatag, lc->lc_rxq);
369 free_txqueue:
370 	ldc_queue_free(sc->sc_dmatag, lc->lc_txq);
371 }
372 
373 int
vdsk_tx_intr(void * arg)374 vdsk_tx_intr(void *arg)
375 {
376 	struct vdsk_softc *sc = arg;
377 	struct ldc_conn *lc = &sc->sc_lc;
378 	uint64_t tx_head, tx_tail, tx_state;
379 
380 	hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state);
381 	if (tx_state != lc->lc_tx_state) {
382 		switch (tx_state) {
383 		case LDC_CHANNEL_DOWN:
384 			DPRINTF(("%s: Tx link down\n", __func__));
385 			break;
386 		case LDC_CHANNEL_UP:
387 			DPRINTF(("%s: Tx link up\n", __func__));
388 			break;
389 		case LDC_CHANNEL_RESET:
390 			DPRINTF(("%s: Tx link reset\n", __func__));
391 			break;
392 		}
393 		lc->lc_tx_state = tx_state;
394 	}
395 
396 	return (1);
397 }
398 
399 int
vdsk_rx_intr(void * arg)400 vdsk_rx_intr(void *arg)
401 {
402 	struct vdsk_softc *sc = arg;
403 	struct ldc_conn *lc = &sc->sc_lc;
404 	uint64_t rx_head, rx_tail, rx_state;
405 	struct ldc_pkt *lp;
406 	int err;
407 
408 	err = hv_ldc_rx_get_state(lc->lc_id, &rx_head, &rx_tail, &rx_state);
409 	if (err == H_EINVAL)
410 		return (0);
411 	if (err != H_EOK) {
412 		printf("hv_ldc_rx_get_state %d\n", err);
413 		return (0);
414 	}
415 
416 	if (rx_state != lc->lc_rx_state) {
417 		sc->sc_vio_state = 0;
418 		lc->lc_tx_seqid = 0;
419 		lc->lc_state = 0;
420 		switch (rx_state) {
421 		case LDC_CHANNEL_DOWN:
422 			DPRINTF(("%s: Rx link down\n", __func__));
423 			break;
424 		case LDC_CHANNEL_UP:
425 			DPRINTF(("%s: Rx link up\n", __func__));
426 			ldc_send_vers(lc);
427 			break;
428 		case LDC_CHANNEL_RESET:
429 			DPRINTF(("%s: Rx link reset\n", __func__));
430 			break;
431 		}
432 		lc->lc_rx_state = rx_state;
433 		hv_ldc_rx_set_qhead(lc->lc_id, rx_tail);
434 		return (1);
435 	}
436 
437 	if (rx_head == rx_tail)
438 		return (0);
439 
440 	lp = (struct ldc_pkt *)(lc->lc_rxq->lq_va + rx_head);
441 	switch (lp->type) {
442 	case LDC_CTRL:
443 		ldc_rx_ctrl(lc, lp);
444 		break;
445 
446 	case LDC_DATA:
447 		ldc_rx_data(lc, lp);
448 		break;
449 
450 	default:
451 		DPRINTF(("%0x02/%0x02/%0x02\n", lp->type, lp->stype,
452 		    lp->ctrl));
453 		ldc_reset(lc);
454 		break;
455 	}
456 
457 	if (lc->lc_state == 0)
458 		return (1);
459 
460 	rx_head += sizeof(*lp);
461 	rx_head &= ((lc->lc_rxq->lq_nentries * sizeof(*lp)) - 1);
462 	err = hv_ldc_rx_set_qhead(lc->lc_id, rx_head);
463 	if (err != H_EOK)
464 		printf("%s: hv_ldc_rx_set_qhead %d\n", __func__, err);
465 
466 	return (1);
467 }
468 
469 void
vdsk_rx_data(struct ldc_conn * lc,struct ldc_pkt * lp)470 vdsk_rx_data(struct ldc_conn *lc, struct ldc_pkt *lp)
471 {
472 	struct vio_msg *vm = (struct vio_msg *)lp;
473 
474 	switch (vm->type) {
475 	case VIO_TYPE_CTRL:
476 		if ((lp->env & LDC_FRAG_START) == 0 &&
477 		    (lp->env & LDC_FRAG_STOP) == 0)
478 			return;
479 		vdsk_rx_vio_ctrl(lc->lc_sc, vm);
480 		break;
481 
482 	case VIO_TYPE_DATA:
483 		if((lp->env & LDC_FRAG_START) == 0)
484 			return;
485 		vdsk_rx_vio_data(lc->lc_sc, vm);
486 		break;
487 
488 	default:
489 		DPRINTF(("Unhandled packet type 0x%02x\n", vm->type));
490 		ldc_reset(lc);
491 		break;
492 	}
493 }
494 
495 void
vdsk_rx_vio_ctrl(struct vdsk_softc * sc,struct vio_msg * vm)496 vdsk_rx_vio_ctrl(struct vdsk_softc *sc, struct vio_msg *vm)
497 {
498 	struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type;
499 
500 	switch (tag->stype_env) {
501 	case VIO_VER_INFO:
502 		vdsk_rx_vio_ver_info(sc, tag);
503 		break;
504 	case VIO_ATTR_INFO:
505 		vdsk_rx_vio_attr_info(sc, tag);
506 		break;
507 	case VIO_DRING_REG:
508 		vdsk_rx_vio_dring_reg(sc, tag);
509 		break;
510 	case VIO_RDX:
511 		vdsk_rx_vio_rdx(sc, tag);
512 		break;
513 	default:
514 		DPRINTF(("CTRL/0x%02x/0x%04x\n", tag->stype, tag->stype_env));
515 		break;
516 	}
517 }
518 
519 void
vdsk_rx_vio_ver_info(struct vdsk_softc * sc,struct vio_msg_tag * tag)520 vdsk_rx_vio_ver_info(struct vdsk_softc *sc, struct vio_msg_tag *tag)
521 {
522 	struct vio_ver_info *vi = (struct vio_ver_info *)tag;
523 
524 	switch (vi->tag.stype) {
525 	case VIO_SUBTYPE_INFO:
526 		DPRINTF(("CTRL/INFO/VER_INFO\n"));
527 		break;
528 
529 	case VIO_SUBTYPE_ACK:
530 		DPRINTF(("CTRL/ACK/VER_INFO\n"));
531 		if (!ISSET(sc->sc_vio_state, VIO_SND_VER_INFO)) {
532 			ldc_reset(&sc->sc_lc);
533 			break;
534 		}
535 		sc->sc_major = vi->major;
536 		sc->sc_minor = vi->minor;
537 		sc->sc_vio_state |= VIO_ACK_VER_INFO;
538 		break;
539 
540 	default:
541 		DPRINTF(("CTRL/0x%02x/VER_INFO\n", vi->tag.stype));
542 		break;
543 	}
544 
545 	if (ISSET(sc->sc_vio_state, VIO_ACK_VER_INFO))
546 		vdsk_send_attr_info(sc);
547 }
548 
549 void
vdsk_rx_vio_attr_info(struct vdsk_softc * sc,struct vio_msg_tag * tag)550 vdsk_rx_vio_attr_info(struct vdsk_softc *sc, struct vio_msg_tag *tag)
551 {
552 	struct vd_attr_info *ai = (struct vd_attr_info *)tag;
553 
554 	switch (ai->tag.stype) {
555 	case VIO_SUBTYPE_INFO:
556 		DPRINTF(("CTRL/INFO/ATTR_INFO\n"));
557 		break;
558 
559 	case VIO_SUBTYPE_ACK:
560 		DPRINTF(("CTRL/ACK/ATTR_INFO\n"));
561 		if (!ISSET(sc->sc_vio_state, VIO_SND_ATTR_INFO)) {
562 			ldc_reset(&sc->sc_lc);
563 			break;
564 		}
565 
566 		sc->sc_vdisk_block_size = ai->vdisk_block_size;
567 		sc->sc_vdisk_size = ai->vdisk_size;
568 		if (sc->sc_major > 1 || sc->sc_minor >= 1)
569 			sc->sc_vd_mtype = ai->vd_mtype;
570 		else
571 			sc->sc_vd_mtype = VD_MEDIA_TYPE_FIXED;
572 
573 		sc->sc_vio_state |= VIO_ACK_ATTR_INFO;
574 		break;
575 
576 	default:
577 		DPRINTF(("CTRL/0x%02x/ATTR_INFO\n", ai->tag.stype));
578 		break;
579 	}
580 
581 	if (ISSET(sc->sc_vio_state, VIO_ACK_ATTR_INFO))
582 		vdsk_send_dring_reg(sc);
583 
584 }
585 
586 void
vdsk_rx_vio_dring_reg(struct vdsk_softc * sc,struct vio_msg_tag * tag)587 vdsk_rx_vio_dring_reg(struct vdsk_softc *sc, struct vio_msg_tag *tag)
588 {
589 	struct vio_dring_reg *dr = (struct vio_dring_reg *)tag;
590 
591 	switch (dr->tag.stype) {
592 	case VIO_SUBTYPE_INFO:
593 		DPRINTF(("CTRL/INFO/DRING_REG\n"));
594 		break;
595 
596 	case VIO_SUBTYPE_ACK:
597 		DPRINTF(("CTRL/ACK/DRING_REG\n"));
598 		if (!ISSET(sc->sc_vio_state, VIO_SND_DRING_REG)) {
599 			ldc_reset(&sc->sc_lc);
600 			break;
601 		}
602 
603 		sc->sc_dring_ident = dr->dring_ident;
604 		sc->sc_seq_no = 1;
605 
606 		sc->sc_vio_state |= VIO_ACK_DRING_REG;
607 		break;
608 
609 	default:
610 		DPRINTF(("CTRL/0x%02x/DRING_REG\n", dr->tag.stype));
611 		break;
612 	}
613 
614 	if (ISSET(sc->sc_vio_state, VIO_ACK_DRING_REG))
615 		vdsk_send_rdx(sc);
616 }
617 
618 void
vdsk_rx_vio_rdx(struct vdsk_softc * sc,struct vio_msg_tag * tag)619 vdsk_rx_vio_rdx(struct vdsk_softc *sc, struct vio_msg_tag *tag)
620 {
621 	switch(tag->stype) {
622 	case VIO_SUBTYPE_INFO:
623 		DPRINTF(("CTRL/INFO/RDX\n"));
624 		break;
625 
626 	case VIO_SUBTYPE_ACK:
627 	{
628 		int prod;
629 
630 		DPRINTF(("CTRL/ACK/RDX\n"));
631 		if (!ISSET(sc->sc_vio_state, VIO_SND_RDX)) {
632 			ldc_reset(&sc->sc_lc);
633 			break;
634 		}
635 		sc->sc_vio_state |= VIO_ACK_RDX;
636 
637 		/*
638 		 * If this ACK is the result of a reconnect, we may
639 		 * have pending I/O that we need to resubmit.  We need
640 		 * to rebuild the ring descriptors though since the
641 		 * vDisk server on the other side may have touched
642 		 * them already.  So we just clean up the ring and the
643 		 * LDC map and resubmit the SCSI commands based on our
644 		 * soft descriptors.
645 		 */
646 		prod = sc->sc_tx_prod;
647 		sc->sc_tx_prod = sc->sc_tx_cons;
648 		sc->sc_tx_cnt = 0;
649 		sc->sc_lm->lm_next = 1;
650 		sc->sc_lm->lm_count = 1;
651 		while (sc->sc_tx_prod != prod)
652 			vdsk_submit_cmd(sc->sc_vsd[sc->sc_tx_prod].vsd_xs);
653 
654 		scsi_iopool_run(&sc->sc_iopool);
655 		break;
656 	}
657 
658 	default:
659 		DPRINTF(("CTRL/0x%02x/RDX (VIO)\n", tag->stype));
660 		break;
661 	}
662 }
663 
664 void
vdsk_rx_vio_data(struct vdsk_softc * sc,struct vio_msg * vm)665 vdsk_rx_vio_data(struct vdsk_softc *sc, struct vio_msg *vm)
666 {
667 	struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type;
668 
669 	if (sc->sc_vio_state != VIO_ESTABLISHED) {
670 		DPRINTF(("Spurious DATA/0x%02x/0x%04x\n", tag->stype,
671 		    tag->stype_env));
672 		return;
673 	}
674 
675 	switch(tag->stype_env) {
676 	case VIO_DRING_DATA:
677 		vdsk_rx_vio_dring_data(sc, tag);
678 		break;
679 
680 	default:
681 		DPRINTF(("DATA/0x%02x/0x%04x\n", tag->stype, tag->stype_env));
682 		break;
683 	}
684 }
685 
686 void
vdsk_rx_vio_dring_data(struct vdsk_softc * sc,struct vio_msg_tag * tag)687 vdsk_rx_vio_dring_data(struct vdsk_softc *sc, struct vio_msg_tag *tag)
688 {
689 	switch(tag->stype) {
690 	case VIO_SUBTYPE_INFO:
691 		DPRINTF(("DATA/INFO/DRING_DATA\n"));
692 		break;
693 
694 	case VIO_SUBTYPE_ACK:
695 	{
696 		struct scsi_xfer *xs;
697 		int cons;
698 
699 		cons = sc->sc_tx_cons;
700 		while (sc->sc_vd->vd_desc[cons].hdr.dstate == VIO_DESC_DONE) {
701 			xs = sc->sc_vsd[cons].vsd_xs;
702 			if (ISSET(xs->flags, SCSI_POLL) == 0)
703 				vdsk_complete_cmd(xs, cons);
704 			cons++;
705 			cons &= (sc->sc_vd->vd_nentries - 1);
706 		}
707 		sc->sc_tx_cons = cons;
708 		break;
709 	}
710 
711 	case VIO_SUBTYPE_NACK:
712 		DPRINTF(("DATA/NACK/DRING_DATA\n"));
713 		break;
714 
715 	default:
716 		DPRINTF(("DATA/0x%02x/DRING_DATA\n", tag->stype));
717 		break;
718 	}
719 }
720 
721 void
vdsk_ldc_reset(struct ldc_conn * lc)722 vdsk_ldc_reset(struct ldc_conn *lc)
723 {
724 	struct vdsk_softc *sc = lc->lc_sc;
725 
726 	sc->sc_vio_state = 0;
727 }
728 
729 void
vdsk_ldc_start(struct ldc_conn * lc)730 vdsk_ldc_start(struct ldc_conn *lc)
731 {
732 	struct vdsk_softc *sc = lc->lc_sc;
733 
734 	vdsk_send_ver_info(sc, VDSK_MAJOR, VDSK_MINOR);
735 }
736 
737 void
vdsk_sendmsg(struct vdsk_softc * sc,void * msg,size_t len)738 vdsk_sendmsg(struct vdsk_softc *sc, void *msg, size_t len)
739 {
740 	struct ldc_conn *lc = &sc->sc_lc;
741 	int err;
742 
743 	err = ldc_send_unreliable(lc, msg, len);
744 	if (err)
745 		printf("%s: ldc_send_unreliable: %d\n", __func__, err);
746 }
747 
748 void
vdsk_send_ver_info(struct vdsk_softc * sc,uint16_t major,uint16_t minor)749 vdsk_send_ver_info(struct vdsk_softc *sc, uint16_t major, uint16_t minor)
750 {
751 	struct vio_ver_info vi;
752 
753 	/* Allocate new session ID. */
754 	sc->sc_local_sid = tick();
755 
756 	bzero(&vi, sizeof(vi));
757 	vi.tag.type = VIO_TYPE_CTRL;
758 	vi.tag.stype = VIO_SUBTYPE_INFO;
759 	vi.tag.stype_env = VIO_VER_INFO;
760 	vi.tag.sid = sc->sc_local_sid;
761 	vi.major = major;
762 	vi.minor = minor;
763 	vi.dev_class = VDEV_DISK;
764 	vdsk_sendmsg(sc, &vi, sizeof(vi));
765 
766 	sc->sc_vio_state |= VIO_SND_VER_INFO;
767 }
768 
769 void
vdsk_send_attr_info(struct vdsk_softc * sc)770 vdsk_send_attr_info(struct vdsk_softc *sc)
771 {
772 	struct vd_attr_info ai;
773 
774 	bzero(&ai, sizeof(ai));
775 	ai.tag.type = VIO_TYPE_CTRL;
776 	ai.tag.stype = VIO_SUBTYPE_INFO;
777 	ai.tag.stype_env = VIO_ATTR_INFO;
778 	ai.tag.sid = sc->sc_local_sid;
779 	ai.xfer_mode = VIO_DRING_MODE;
780 	ai.vdisk_block_size = DEV_BSIZE;
781 	ai.max_xfer_sz = MAXPHYS / DEV_BSIZE;
782 	vdsk_sendmsg(sc, &ai, sizeof(ai));
783 
784 	sc->sc_vio_state |= VIO_SND_ATTR_INFO;
785 }
786 
787 void
vdsk_send_dring_reg(struct vdsk_softc * sc)788 vdsk_send_dring_reg(struct vdsk_softc *sc)
789 {
790 	struct vio_dring_reg dr;
791 
792 	bzero(&dr, sizeof(dr));
793 	dr.tag.type = VIO_TYPE_CTRL;
794 	dr.tag.stype = VIO_SUBTYPE_INFO;
795 	dr.tag.stype_env = VIO_DRING_REG;
796 	dr.tag.sid = sc->sc_local_sid;
797 	dr.dring_ident = 0;
798 	dr.num_descriptors = sc->sc_vd->vd_nentries;
799 	dr.descriptor_size = sizeof(struct vd_desc);
800 	dr.options = VIO_TX_RING | VIO_RX_RING;
801 	dr.ncookies = 1;
802 	dr.cookie[0].addr = 0;
803 	dr.cookie[0].size = PAGE_SIZE;
804 	vdsk_sendmsg(sc, &dr, sizeof(dr));
805 
806 	sc->sc_vio_state |= VIO_SND_DRING_REG;
807 };
808 
809 void
vdsk_send_rdx(struct vdsk_softc * sc)810 vdsk_send_rdx(struct vdsk_softc *sc)
811 {
812 	struct vio_rdx rdx;
813 
814 	bzero(&rdx, sizeof(rdx));
815 	rdx.tag.type = VIO_TYPE_CTRL;
816 	rdx.tag.stype = VIO_SUBTYPE_INFO;
817 	rdx.tag.stype_env = VIO_RDX;
818 	rdx.tag.sid = sc->sc_local_sid;
819 	vdsk_sendmsg(sc, &rdx, sizeof(rdx));
820 
821 	sc->sc_vio_state |= VIO_SND_RDX;
822 }
823 
824 struct vdsk_dring *
vdsk_dring_alloc(bus_dma_tag_t t,int nentries)825 vdsk_dring_alloc(bus_dma_tag_t t, int nentries)
826 {
827 	struct vdsk_dring *vd;
828 	bus_size_t size;
829 	caddr_t va;
830 	int nsegs;
831 	int i;
832 
833 	vd = malloc(sizeof(struct vdsk_dring), M_DEVBUF, M_NOWAIT);
834 	if (vd == NULL)
835 		return NULL;
836 
837 	size = roundup(nentries * sizeof(struct vd_desc), PAGE_SIZE);
838 
839 	if (bus_dmamap_create(t, size, 1, size, 0,
840 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &vd->vd_map) != 0)
841 		goto error;
842 
843 	if (bus_dmamem_alloc(t, size, PAGE_SIZE, 0, &vd->vd_seg, 1,
844 	    &nsegs, BUS_DMA_NOWAIT) != 0)
845 		goto destroy;
846 
847 	if (bus_dmamem_map(t, &vd->vd_seg, 1, size, &va,
848 	    BUS_DMA_NOWAIT) != 0)
849 		goto free;
850 
851 	if (bus_dmamap_load(t, vd->vd_map, va, size, NULL,
852 	    BUS_DMA_NOWAIT) != 0)
853 		goto unmap;
854 
855 	vd->vd_desc = (struct vd_desc *)va;
856 	vd->vd_nentries = nentries;
857 	bzero(vd->vd_desc, nentries * sizeof(struct vd_desc));
858 	for (i = 0; i < vd->vd_nentries; i++)
859 		vd->vd_desc[i].hdr.dstate = VIO_DESC_FREE;
860 	return (vd);
861 
862 unmap:
863 	bus_dmamem_unmap(t, va, size);
864 free:
865 	bus_dmamem_free(t, &vd->vd_seg, 1);
866 destroy:
867 	bus_dmamap_destroy(t, vd->vd_map);
868 error:
869 	free(vd, M_DEVBUF, sizeof(struct vdsk_dring));
870 
871 	return (NULL);
872 }
873 
874 void
vdsk_dring_free(bus_dma_tag_t t,struct vdsk_dring * vd)875 vdsk_dring_free(bus_dma_tag_t t, struct vdsk_dring *vd)
876 {
877 	bus_size_t size;
878 
879 	size = vd->vd_nentries * sizeof(struct vd_desc);
880 	size = roundup(size, PAGE_SIZE);
881 
882 	bus_dmamap_unload(t, vd->vd_map);
883 	bus_dmamem_unmap(t, (caddr_t)vd->vd_desc, size);
884 	bus_dmamem_free(t, &vd->vd_seg, 1);
885 	bus_dmamap_destroy(t, vd->vd_map);
886 	free(vd, M_DEVBUF, 0);
887 }
888 
889 void *
vdsk_io_get(void * xsc)890 vdsk_io_get(void *xsc)
891 {
892 	struct vdsk_softc *sc = xsc;
893 	void *rv = sc; /* just has to be !NULL */
894 	int s;
895 
896 	s = splbio();
897 	if (sc->sc_vio_state != VIO_ESTABLISHED ||
898 	    sc->sc_tx_cnt >= sc->sc_vd->vd_nentries)
899 		rv = NULL;
900 	else
901 		sc->sc_tx_cnt++;
902 	splx(s);
903 
904 	return (rv);
905 }
906 
907 void
vdsk_io_put(void * xsc,void * io)908 vdsk_io_put(void *xsc, void *io)
909 {
910 	struct vdsk_softc *sc = xsc;
911 	int s;
912 
913 #ifdef DIAGNOSTIC
914 	if (sc != io)
915 		panic("vdsk_io_put: unexpected io");
916 #endif
917 
918 	s = splbio();
919 	sc->sc_tx_cnt--;
920 	splx(s);
921 }
922 
923 void
vdsk_scsi_cmd(struct scsi_xfer * xs)924 vdsk_scsi_cmd(struct scsi_xfer *xs)
925 {
926 	struct vdsk_softc *sc = xs->sc_link->bus->sb_adapter_softc;
927 	int timeout, s;
928 	int desc;
929 
930 	switch (xs->cmd.opcode) {
931 	case READ_COMMAND:
932 	case READ_10:
933 	case READ_12:
934 	case READ_16:
935 	case WRITE_COMMAND:
936 	case WRITE_10:
937 	case WRITE_12:
938 	case WRITE_16:
939 	case SYNCHRONIZE_CACHE:
940 		break;
941 
942 	case INQUIRY:
943 		vdsk_scsi_inq(xs);
944 		return;
945 	case READ_CAPACITY:
946 		vdsk_scsi_capacity(xs);
947 		return;
948 	case READ_CAPACITY_16:
949 		vdsk_scsi_capacity16(xs);
950 		return;
951 
952 	case TEST_UNIT_READY:
953 	case START_STOP:
954 	case PREVENT_ALLOW:
955 		vdsk_scsi_done(xs, XS_NOERROR);
956 		return;
957 
958 	default:
959 		printf("%s cmd 0x%02x\n", __func__, xs->cmd.opcode);
960 	case MODE_SENSE:
961 	case MODE_SENSE_BIG:
962 	case REPORT_LUNS:
963 	case READ_TOC:
964 		vdsk_scsi_done(xs, XS_DRIVER_STUFFUP);
965 		return;
966 	}
967 
968 	s = splbio();
969 	desc = vdsk_submit_cmd(xs);
970 
971 	if (!ISSET(xs->flags, SCSI_POLL)) {
972 		splx(s);
973 		return;
974 	}
975 
976 	timeout = 1000;
977 	do {
978 		if (sc->sc_vd->vd_desc[desc].hdr.dstate == VIO_DESC_DONE)
979 			break;
980 
981 		delay(1000);
982 	} while(--timeout > 0);
983 	if (sc->sc_vd->vd_desc[desc].hdr.dstate == VIO_DESC_DONE) {
984 		vdsk_complete_cmd(xs, desc);
985 	} else {
986 		ldc_reset(&sc->sc_lc);
987 		vdsk_scsi_done(xs, XS_TIMEOUT);
988 	}
989 	splx(s);
990 }
991 
992 int
vdsk_submit_cmd(struct scsi_xfer * xs)993 vdsk_submit_cmd(struct scsi_xfer *xs)
994 {
995 	struct vdsk_softc *sc = xs->sc_link->bus->sb_adapter_softc;
996 	struct ldc_map *map = sc->sc_lm;
997 	struct vio_dring_msg dm;
998 	struct scsi_rw *rw;
999 	struct scsi_rw_10 *rw10;
1000 	struct scsi_rw_12 *rw12;
1001 	struct scsi_rw_16 *rw16;
1002 	u_int64_t lba;
1003 	u_int32_t sector_count;
1004 	uint8_t operation;
1005 	vaddr_t va;
1006 	paddr_t pa;
1007 	psize_t nbytes;
1008 	int len, ncookies;
1009 	int desc;
1010 
1011 	switch (xs->cmd.opcode) {
1012 	case READ_COMMAND:
1013 	case READ_10:
1014 	case READ_12:
1015 	case READ_16:
1016 		operation = VD_OP_BREAD;
1017 		break;
1018 
1019 	case WRITE_COMMAND:
1020 	case WRITE_10:
1021 	case WRITE_12:
1022 	case WRITE_16:
1023 		operation = VD_OP_BWRITE;
1024 		break;
1025 
1026 	case SYNCHRONIZE_CACHE:
1027 		operation = VD_OP_FLUSH;
1028 		break;
1029 	}
1030 
1031 	/*
1032 	 * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE has same
1033 	 * layout as 10-byte READ/WRITE commands.
1034 	 */
1035 	if (xs->cmdlen == 6) {
1036 		rw = (struct scsi_rw *)&xs->cmd;
1037 		lba = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
1038 		sector_count = rw->length ? rw->length : 0x100;
1039 	} else if (xs->cmdlen == 10) {
1040 		rw10 = (struct scsi_rw_10 *)&xs->cmd;
1041 		lba = _4btol(rw10->addr);
1042 		sector_count = _2btol(rw10->length);
1043 	} else if (xs->cmdlen == 12) {
1044 		rw12 = (struct scsi_rw_12 *)&xs->cmd;
1045 		lba = _4btol(rw12->addr);
1046 		sector_count = _4btol(rw12->length);
1047 	} else if (xs->cmdlen == 16) {
1048 		rw16 = (struct scsi_rw_16 *)&xs->cmd;
1049 		lba = _8btol(rw16->addr);
1050 		sector_count = _4btol(rw16->length);
1051 	}
1052 
1053 	desc = sc->sc_tx_prod;
1054 
1055 	ncookies = 0;
1056 	len = xs->datalen;
1057 	va = (vaddr_t)xs->data;
1058 	while (len > 0) {
1059 		KASSERT(ncookies < MAXPHYS / PAGE_SIZE);
1060 		pmap_extract(pmap_kernel(), va, &pa);
1061 		while (map->lm_slot[map->lm_next].entry != 0) {
1062 			map->lm_next++;
1063 			map->lm_next &= (map->lm_nentries - 1);
1064 		}
1065 		map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK);
1066 		map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR | LDC_MTE_CPW;
1067 		map->lm_slot[map->lm_next].entry |= LDC_MTE_IOR | LDC_MTE_IOW;
1068 		map->lm_slot[map->lm_next].entry |= LDC_MTE_R | LDC_MTE_W;
1069 		map->lm_count++;
1070 
1071 		nbytes = MIN(len, PAGE_SIZE - (pa & PAGE_MASK));
1072 
1073 		sc->sc_vd->vd_desc[desc].cookie[ncookies].addr =
1074 		    map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK);
1075 		sc->sc_vd->vd_desc[desc].cookie[ncookies].size = nbytes;
1076 
1077 		sc->sc_vsd[desc].vsd_map_idx[ncookies] = map->lm_next;
1078 		va += nbytes;
1079 		len -= nbytes;
1080 		ncookies++;
1081 	}
1082 
1083 	if (ISSET(xs->flags, SCSI_POLL) == 0)
1084 		sc->sc_vd->vd_desc[desc].hdr.ack = 1;
1085 	else
1086 		sc->sc_vd->vd_desc[desc].hdr.ack = 0;
1087 	sc->sc_vd->vd_desc[desc].operation = operation;
1088 	sc->sc_vd->vd_desc[desc].slice = VD_SLICE_NONE;
1089 	sc->sc_vd->vd_desc[desc].status = 0xffffffff;
1090 	sc->sc_vd->vd_desc[desc].offset = lba;
1091 	sc->sc_vd->vd_desc[desc].size = xs->datalen;
1092 	sc->sc_vd->vd_desc[desc].ncookies = ncookies;
1093 	membar_sync();
1094 	sc->sc_vd->vd_desc[desc].hdr.dstate = VIO_DESC_READY;
1095 
1096 	sc->sc_vsd[desc].vsd_xs = xs;
1097 	sc->sc_vsd[desc].vsd_ncookies = ncookies;
1098 
1099 	sc->sc_tx_prod++;
1100 	sc->sc_tx_prod &= (sc->sc_vd->vd_nentries - 1);
1101 
1102 	bzero(&dm, sizeof(dm));
1103 	dm.tag.type = VIO_TYPE_DATA;
1104 	dm.tag.stype = VIO_SUBTYPE_INFO;
1105 	dm.tag.stype_env = VIO_DRING_DATA;
1106 	dm.tag.sid = sc->sc_local_sid;
1107 	dm.seq_no = sc->sc_seq_no++;
1108 	dm.dring_ident = sc->sc_dring_ident;
1109 	dm.start_idx = dm.end_idx = desc;
1110 	vdsk_sendmsg(sc, &dm, sizeof(dm));
1111 
1112 	return desc;
1113 }
1114 
1115 void
vdsk_complete_cmd(struct scsi_xfer * xs,int desc)1116 vdsk_complete_cmd(struct scsi_xfer *xs, int desc)
1117 {
1118 	struct vdsk_softc *sc = xs->sc_link->bus->sb_adapter_softc;
1119 	struct ldc_map *map = sc->sc_lm;
1120 	int cookie, idx;
1121 	int error;
1122 
1123 	cookie = 0;
1124 	while (cookie < sc->sc_vsd[desc].vsd_ncookies) {
1125 		idx = sc->sc_vsd[desc].vsd_map_idx[cookie++];
1126 		map->lm_slot[idx].entry = 0;
1127 		map->lm_count--;
1128 	}
1129 
1130 	error = XS_NOERROR;
1131 	if (sc->sc_vd->vd_desc[desc].status != 0)
1132 		error = XS_DRIVER_STUFFUP;
1133 	xs->resid = xs->datalen -
1134 		sc->sc_vd->vd_desc[desc].size;
1135 	vdsk_scsi_done(xs, error);
1136 
1137 	sc->sc_vd->vd_desc[desc].hdr.dstate = VIO_DESC_FREE;
1138 }
1139 
1140 void
vdsk_scsi_inq(struct scsi_xfer * xs)1141 vdsk_scsi_inq(struct scsi_xfer *xs)
1142 {
1143 	struct scsi_inquiry *inq = (struct scsi_inquiry *)&xs->cmd;
1144 
1145 	if (ISSET(inq->flags, SI_EVPD))
1146 		vdsk_scsi_done(xs, XS_DRIVER_STUFFUP);
1147 	else
1148 		vdsk_scsi_inquiry(xs);
1149 }
1150 
1151 void
vdsk_scsi_inquiry(struct scsi_xfer * xs)1152 vdsk_scsi_inquiry(struct scsi_xfer *xs)
1153 {
1154 	struct vdsk_softc *sc = xs->sc_link->bus->sb_adapter_softc;
1155 	struct scsi_inquiry_data inq;
1156 	char buf[5];
1157 
1158 	bzero(&inq, sizeof(inq));
1159 
1160 	switch (sc->sc_vd_mtype) {
1161 	case VD_MEDIA_TYPE_CD:
1162 	case VD_MEDIA_TYPE_DVD:
1163 		inq.device = T_CDROM;
1164 		break;
1165 
1166 	case VD_MEDIA_TYPE_FIXED:
1167 	default:
1168 		inq.device = T_DIRECT;
1169 		break;
1170 	}
1171 
1172 	inq.version = SCSI_REV_SPC3;
1173 	inq.response_format = SID_SCSI2_RESPONSE;
1174 	inq.additional_length = SID_SCSI2_ALEN;
1175 	inq.flags |= SID_CmdQue;
1176 	bcopy("SUN     ", inq.vendor, sizeof(inq.vendor));
1177 	bcopy("Virtual Disk    ", inq.product, sizeof(inq.product));
1178 	snprintf(buf, sizeof(buf), "%u.%u ", sc->sc_major, sc->sc_minor);
1179 	bcopy(buf, inq.revision, sizeof(inq.revision));
1180 
1181 	scsi_copy_internal_data(xs, &inq, sizeof(inq));
1182 
1183 	vdsk_scsi_done(xs, XS_NOERROR);
1184 }
1185 
1186 void
vdsk_scsi_capacity(struct scsi_xfer * xs)1187 vdsk_scsi_capacity(struct scsi_xfer *xs)
1188 {
1189 	struct vdsk_softc *sc = xs->sc_link->bus->sb_adapter_softc;
1190 	struct scsi_read_cap_data rcd;
1191 	uint64_t capacity;
1192 
1193 	bzero(&rcd, sizeof(rcd));
1194 
1195 	capacity = sc->sc_vdisk_size - 1;
1196 	if (capacity > 0xffffffff)
1197 		capacity = 0xffffffff;
1198 
1199 	_lto4b(capacity, rcd.addr);
1200 	_lto4b(sc->sc_vdisk_block_size, rcd.length);
1201 
1202 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
1203 
1204 	vdsk_scsi_done(xs, XS_NOERROR);
1205 }
1206 
1207 void
vdsk_scsi_capacity16(struct scsi_xfer * xs)1208 vdsk_scsi_capacity16(struct scsi_xfer *xs)
1209 {
1210 	struct vdsk_softc *sc = xs->sc_link->bus->sb_adapter_softc;
1211 	struct scsi_read_cap_data_16 rcd;
1212 
1213 	bzero(&rcd, sizeof(rcd));
1214 
1215 	_lto8b(sc->sc_vdisk_size - 1, rcd.addr);
1216 	_lto4b(sc->sc_vdisk_block_size, rcd.length);
1217 
1218 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
1219 
1220 	vdsk_scsi_done(xs, XS_NOERROR);
1221 }
1222 
1223 void
vdsk_scsi_done(struct scsi_xfer * xs,int error)1224 vdsk_scsi_done(struct scsi_xfer *xs, int error)
1225 {
1226 	xs->error = error;
1227 
1228 	scsi_done(xs);
1229 }
1230 
1231