xref: /openbsd/sys/arch/sparc64/dev/vdsk.c (revision fc61954a)
1 /*	$OpenBSD: vdsk.c,v 1.47 2016/10/13 18:16:42 tom Exp $	*/
2 /*
3  * Copyright (c) 2009, 2011 Mark Kettenis
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/param.h>
19 #include <sys/buf.h>
20 #include <sys/device.h>
21 #include <sys/malloc.h>
22 #include <sys/systm.h>
23 
24 #include <machine/autoconf.h>
25 #include <machine/hypervisor.h>
26 
27 #include <uvm/uvm_extern.h>
28 
29 #include <scsi/scsi_all.h>
30 #include <scsi/cd.h>
31 #include <scsi/scsi_disk.h>
32 #include <scsi/scsiconf.h>
33 
34 #include <sparc64/dev/cbusvar.h>
35 #include <sparc64/dev/ldcvar.h>
36 #include <sparc64/dev/viovar.h>
37 
38 #ifdef VDSK_DEBUG
39 #define DPRINTF(x)	printf x
40 #else
41 #define DPRINTF(x)
42 #endif
43 
44 #define VDSK_TX_ENTRIES		32
45 #define VDSK_RX_ENTRIES		32
46 
47 struct vd_attr_info {
48 	struct vio_msg_tag	tag;
49 	uint8_t			xfer_mode;
50 	uint8_t			vd_type;
51 	uint8_t			vd_mtype;
52 	uint8_t			_reserved1;
53 	uint32_t		vdisk_block_size;
54 	uint64_t		operations;
55 	uint64_t		vdisk_size;
56 	uint64_t		max_xfer_sz;
57 	uint64_t		_reserved2[2];
58 };
59 
60 #define VD_DISK_TYPE_SLICE	0x01
61 #define VD_DISK_TYPE_DISK	0x02
62 
63 #define VD_MEDIA_TYPE_FIXED	0x01
64 #define VD_MEDIA_TYPE_CD	0x02
65 #define VD_MEDIA_TYPE_DVD	0x03
66 
67 /* vDisk version 1.0. */
68 #define VD_OP_BREAD		0x01
69 #define VD_OP_BWRITE		0x02
70 #define VD_OP_FLUSH		0x03
71 #define VD_OP_GET_WCE		0x04
72 #define VD_OP_SET_WCE		0x05
73 #define VD_OP_GET_VTOC		0x06
74 #define VD_OP_SET_VTOC		0x07
75 #define VD_OP_GET_DISKGEOM	0x08
76 #define VD_OP_SET_DISKGEOM	0x09
77 #define VD_OP_GET_DEVID		0x0b
78 #define VD_OP_GET_EFI		0x0c
79 #define VD_OP_SET_EFI		0x0d
80 
81 /* vDisk version 1.1 */
82 #define VD_OP_SCSICMD		0x0a
83 #define VD_OP_RESET		0x0e
84 #define VD_OP_GET_ACCESS	0x0f
85 #define VD_OP_SET_ACCESS	0x10
86 #define VD_OP_GET_CAPACITY	0x11
87 
88 struct vd_desc {
89 	struct vio_dring_hdr	hdr;
90 	uint64_t		req_id;
91 	uint8_t			operation;
92 	uint8_t			slice;
93 	uint16_t		_reserved1;
94 	uint32_t		status;
95 	uint64_t		offset;
96 	uint64_t		size;
97 	uint32_t		ncookies;
98 	uint32_t		_reserved2;
99 	struct ldc_cookie	cookie[MAXPHYS / PAGE_SIZE];
100 };
101 
102 #define VD_SLICE_NONE		0xff
103 
104 struct vdsk_dring {
105 	bus_dmamap_t		vd_map;
106 	bus_dma_segment_t	vd_seg;
107 	struct vd_desc		*vd_desc;
108 	int			vd_nentries;
109 };
110 
111 struct vdsk_dring *vdsk_dring_alloc(bus_dma_tag_t, int);
112 void	vdsk_dring_free(bus_dma_tag_t, struct vdsk_dring *);
113 
114 /*
115  * We support vDisk 1.0 and 1.1.
116  */
117 #define VDSK_MAJOR	1
118 #define VDSK_MINOR	1
119 
120 struct vdsk_soft_desc {
121 	int		vsd_map_idx[MAXPHYS / PAGE_SIZE];
122 	struct scsi_xfer *vsd_xs;
123 	int		vsd_ncookies;
124 };
125 
126 struct vdsk_softc {
127 	struct device	sc_dv;
128 	bus_space_tag_t	sc_bustag;
129 	bus_dma_tag_t	sc_dmatag;
130 
131 	void		*sc_tx_ih;
132 	void		*sc_rx_ih;
133 
134 	struct ldc_conn	sc_lc;
135 
136 	uint16_t	sc_vio_state;
137 #define VIO_SND_VER_INFO	0x0001
138 #define VIO_ACK_VER_INFO	0x0002
139 #define VIO_SND_ATTR_INFO	0x0004
140 #define VIO_ACK_ATTR_INFO	0x0008
141 #define VIO_SND_DRING_REG	0x0010
142 #define VIO_ACK_DRING_REG	0x0020
143 #define VIO_SND_RDX		0x0040
144 #define VIO_ACK_RDX		0x0080
145 #define VIO_ESTABLISHED		0x00ff
146 
147 	uint16_t	sc_major;
148 	uint16_t	sc_minor;
149 
150 	uint32_t	sc_local_sid;
151 	uint64_t	sc_dring_ident;
152 	uint64_t	sc_seq_no;
153 
154 	int		sc_tx_cnt;
155 	int		sc_tx_prod;
156 	int		sc_tx_cons;
157 
158 	struct ldc_map	*sc_lm;
159 	struct vdsk_dring *sc_vd;
160 	struct vdsk_soft_desc *sc_vsd;
161 
162 	struct scsi_iopool sc_iopool;
163 	struct scsi_adapter sc_switch;
164 	struct scsi_link sc_link;
165 
166 	uint32_t	sc_vdisk_block_size;
167 	uint64_t	sc_vdisk_size;
168 	uint8_t		sc_vd_mtype;
169 };
170 
171 int	vdsk_match(struct device *, void *, void *);
172 void	vdsk_attach(struct device *, struct device *, void *);
173 
174 struct cfattach vdsk_ca = {
175 	sizeof(struct vdsk_softc), vdsk_match, vdsk_attach
176 };
177 
178 struct cfdriver vdsk_cd = {
179 	NULL, "vdsk", DV_DULL
180 };
181 
182 int	vdsk_tx_intr(void *);
183 int	vdsk_rx_intr(void *);
184 
185 void	vdsk_rx_data(struct ldc_conn *, struct ldc_pkt *);
186 void	vdsk_rx_vio_ctrl(struct vdsk_softc *, struct vio_msg *);
187 void	vdsk_rx_vio_ver_info(struct vdsk_softc *, struct vio_msg_tag *);
188 void	vdsk_rx_vio_attr_info(struct vdsk_softc *, struct vio_msg_tag *);
189 void	vdsk_rx_vio_dring_reg(struct vdsk_softc *, struct vio_msg_tag *);
190 void	vdsk_rx_vio_rdx(struct vdsk_softc *sc, struct vio_msg_tag *);
191 void	vdsk_rx_vio_data(struct vdsk_softc *sc, struct vio_msg *);
192 void	vdsk_rx_vio_dring_data(struct vdsk_softc *sc, struct vio_msg_tag *);
193 
194 void	vdsk_ldc_reset(struct ldc_conn *);
195 void	vdsk_ldc_start(struct ldc_conn *);
196 
197 void	vdsk_sendmsg(struct vdsk_softc *, void *, size_t);
198 void	vdsk_send_ver_info(struct vdsk_softc *, uint16_t, uint16_t);
199 void	vdsk_send_attr_info(struct vdsk_softc *);
200 void	vdsk_send_dring_reg(struct vdsk_softc *);
201 void	vdsk_send_rdx(struct vdsk_softc *);
202 
203 void	*vdsk_io_get(void *);
204 void	vdsk_io_put(void *, void *);
205 
206 void	vdsk_scsi_cmd(struct scsi_xfer *);
207 int	vdsk_submit_cmd(struct scsi_xfer *);
208 void	vdsk_complete_cmd(struct scsi_xfer *, int);
209 int	vdsk_dev_probe(struct scsi_link *);
210 void	vdsk_dev_free(struct scsi_link *);
211 
212 void	vdsk_scsi_inq(struct scsi_xfer *);
213 void	vdsk_scsi_inquiry(struct scsi_xfer *);
214 void	vdsk_scsi_capacity(struct scsi_xfer *);
215 void	vdsk_scsi_capacity16(struct scsi_xfer *);
216 void	vdsk_scsi_done(struct scsi_xfer *, int);
217 
218 int
219 vdsk_match(struct device *parent, void *match, void *aux)
220 {
221 	struct cbus_attach_args *ca = aux;
222 
223 	if (strcmp(ca->ca_name, "disk") == 0)
224 		return (1);
225 
226 	return (0);
227 }
228 
229 void
230 vdsk_attach(struct device *parent, struct device *self, void *aux)
231 {
232 	struct vdsk_softc *sc = (struct vdsk_softc *)self;
233 	struct cbus_attach_args *ca = aux;
234 	struct scsibus_attach_args saa;
235 	struct ldc_conn *lc;
236 	int err, s;
237 	int timeout;
238 
239 	sc->sc_bustag = ca->ca_bustag;
240 	sc->sc_dmatag = ca->ca_dmatag;
241 
242 	printf(": ivec 0x%llx, 0x%llx", ca->ca_tx_ino, ca->ca_rx_ino);
243 
244 	/*
245 	 * Un-configure queues before registering interrupt handlers,
246 	 * such that we dont get any stale LDC packets or events.
247 	 */
248 	hv_ldc_tx_qconf(ca->ca_id, 0, 0);
249 	hv_ldc_rx_qconf(ca->ca_id, 0, 0);
250 
251 	sc->sc_tx_ih = bus_intr_establish(ca->ca_bustag, ca->ca_tx_ino,
252 	    IPL_BIO, 0, vdsk_tx_intr, sc, sc->sc_dv.dv_xname);
253 	sc->sc_rx_ih = bus_intr_establish(ca->ca_bustag, ca->ca_rx_ino,
254 	    IPL_BIO, 0, vdsk_rx_intr, sc, sc->sc_dv.dv_xname);
255 	if (sc->sc_tx_ih == NULL || sc->sc_rx_ih == NULL) {
256 		printf(", can't establish interrupt\n");
257 		return;
258 	}
259 
260 	lc = &sc->sc_lc;
261 	lc->lc_id = ca->ca_id;
262 	lc->lc_sc = sc;
263 	lc->lc_reset = vdsk_ldc_reset;
264 	lc->lc_start = vdsk_ldc_start;
265 	lc->lc_rx_data = vdsk_rx_data;
266 
267 	lc->lc_txq = ldc_queue_alloc(sc->sc_dmatag, VDSK_TX_ENTRIES);
268 	if (lc->lc_txq == NULL) {
269 		printf(", can't allocate tx queue\n");
270 		return;
271 	}
272 
273 	lc->lc_rxq = ldc_queue_alloc(sc->sc_dmatag, VDSK_RX_ENTRIES);
274 	if (lc->lc_rxq == NULL) {
275 		printf(", can't allocate rx queue\n");
276 		goto free_txqueue;
277 	}
278 
279 	sc->sc_lm = ldc_map_alloc(sc->sc_dmatag, 2048);
280 	if (sc->sc_lm == NULL) {
281 		printf(", can't allocate LDC mapping table\n");
282 		goto free_rxqueue;
283 	}
284 
285 	err = hv_ldc_set_map_table(lc->lc_id,
286 	    sc->sc_lm->lm_map->dm_segs[0].ds_addr, sc->sc_lm->lm_nentries);
287 	if (err != H_EOK) {
288 		printf("hv_ldc_set_map_table %d\n", err);
289 		goto free_map;
290 	}
291 
292 	sc->sc_vd = vdsk_dring_alloc(sc->sc_dmatag, 32);
293 	if (sc->sc_vd == NULL) {
294 		printf(", can't allocate dring\n");
295 		goto free_map;
296 	}
297 	sc->sc_vsd = malloc(32 * sizeof(*sc->sc_vsd), M_DEVBUF, M_NOWAIT);
298 	if (sc->sc_vsd == NULL) {
299 		printf(", can't allocate software ring\n");
300 		goto free_dring;
301 	}
302 
303 	sc->sc_lm->lm_slot[0].entry = sc->sc_vd->vd_map->dm_segs[0].ds_addr;
304 	sc->sc_lm->lm_slot[0].entry &= LDC_MTE_RA_MASK;
305 	sc->sc_lm->lm_slot[0].entry |= LDC_MTE_CPR | LDC_MTE_CPW;
306 	sc->sc_lm->lm_slot[0].entry |= LDC_MTE_R | LDC_MTE_W;
307 	sc->sc_lm->lm_next = 1;
308 	sc->sc_lm->lm_count = 1;
309 
310 	err = hv_ldc_tx_qconf(lc->lc_id,
311 	    lc->lc_txq->lq_map->dm_segs[0].ds_addr, lc->lc_txq->lq_nentries);
312 	if (err != H_EOK)
313 		printf("hv_ldc_tx_qconf %d\n", err);
314 
315 	err = hv_ldc_rx_qconf(lc->lc_id,
316 	    lc->lc_rxq->lq_map->dm_segs[0].ds_addr, lc->lc_rxq->lq_nentries);
317 	if (err != H_EOK)
318 		printf("hv_ldc_rx_qconf %d\n", err);
319 
320 	cbus_intr_setenabled(sc->sc_bustag, ca->ca_tx_ino, INTR_ENABLED);
321 	cbus_intr_setenabled(sc->sc_bustag, ca->ca_rx_ino, INTR_ENABLED);
322 
323 	ldc_send_vers(lc);
324 
325 	printf("\n");
326 
327 	/*
328 	 * Interrupts aren't enabled during autoconf, so poll for VIO
329 	 * peer-to-peer hanshake completion.
330 	 */
331 	s = splbio();
332 	timeout = 1000;
333 	do {
334 		if (vdsk_rx_intr(sc) && sc->sc_vio_state == VIO_ESTABLISHED)
335 			break;
336 
337 		delay(1000);
338 	} while(--timeout > 0);
339 	splx(s);
340 
341 	if (sc->sc_vio_state != VIO_ESTABLISHED)
342 		return;
343 
344 	scsi_iopool_init(&sc->sc_iopool, sc, vdsk_io_get, vdsk_io_put);
345 
346 	sc->sc_switch.scsi_cmd = vdsk_scsi_cmd;
347 	sc->sc_switch.scsi_minphys = scsi_minphys;
348 	sc->sc_switch.dev_probe = vdsk_dev_probe;
349 	sc->sc_switch.dev_free = vdsk_dev_free;
350 
351 	sc->sc_link.adapter = &sc->sc_switch;
352 	sc->sc_link.adapter_softc = self;
353 	sc->sc_link.adapter_buswidth = 2;
354 	sc->sc_link.luns = 1; /* XXX slices should be presented as luns? */
355 	sc->sc_link.adapter_target = 2;
356 	sc->sc_link.openings = sc->sc_vd->vd_nentries - 1;
357 	sc->sc_link.pool = &sc->sc_iopool;
358 
359 	bzero(&saa, sizeof(saa));
360 	saa.saa_sc_link = &sc->sc_link;
361 	config_found(self, &saa, scsiprint);
362 
363 	return;
364 
365 free_dring:
366 	vdsk_dring_free(sc->sc_dmatag, sc->sc_vd);
367 free_map:
368 	hv_ldc_set_map_table(lc->lc_id, 0, 0);
369 	ldc_map_free(sc->sc_dmatag, sc->sc_lm);
370 free_rxqueue:
371 	ldc_queue_free(sc->sc_dmatag, lc->lc_rxq);
372 free_txqueue:
373 	ldc_queue_free(sc->sc_dmatag, lc->lc_txq);
374 }
375 
376 int
377 vdsk_tx_intr(void *arg)
378 {
379 	struct vdsk_softc *sc = arg;
380 	struct ldc_conn *lc = &sc->sc_lc;
381 	uint64_t tx_head, tx_tail, tx_state;
382 
383 	hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state);
384 	if (tx_state != lc->lc_tx_state) {
385 		switch (tx_state) {
386 		case LDC_CHANNEL_DOWN:
387 			DPRINTF(("Tx link down\n"));
388 			break;
389 		case LDC_CHANNEL_UP:
390 			DPRINTF(("Tx link up\n"));
391 			break;
392 		case LDC_CHANNEL_RESET:
393 			DPRINTF(("Tx link reset\n"));
394 			break;
395 		}
396 		lc->lc_tx_state = tx_state;
397 	}
398 
399 	return (1);
400 }
401 
402 int
403 vdsk_rx_intr(void *arg)
404 {
405 	struct vdsk_softc *sc = arg;
406 	struct ldc_conn *lc = &sc->sc_lc;
407 	uint64_t rx_head, rx_tail, rx_state;
408 	struct ldc_pkt *lp;
409 	int err;
410 
411 	err = hv_ldc_rx_get_state(lc->lc_id, &rx_head, &rx_tail, &rx_state);
412 	if (err == H_EINVAL)
413 		return (0);
414 	if (err != H_EOK) {
415 		printf("hv_ldc_rx_get_state %d\n", err);
416 		return (0);
417 	}
418 
419 	if (rx_state != lc->lc_rx_state) {
420 		sc->sc_vio_state = 0;
421 		lc->lc_tx_seqid = 0;
422 		lc->lc_state = 0;
423 		switch (rx_state) {
424 		case LDC_CHANNEL_DOWN:
425 			DPRINTF(("Rx link down\n"));
426 			break;
427 		case LDC_CHANNEL_UP:
428 			DPRINTF(("Rx link up\n"));
429 			ldc_send_vers(lc);
430 			break;
431 		case LDC_CHANNEL_RESET:
432 			DPRINTF(("Rx link reset\n"));
433 			break;
434 		}
435 		lc->lc_rx_state = rx_state;
436 		hv_ldc_rx_set_qhead(lc->lc_id, rx_tail);
437 		return (1);
438 	}
439 
440 	if (rx_head == rx_tail)
441 		return (0);
442 
443 	lp = (struct ldc_pkt *)(lc->lc_rxq->lq_va + rx_head);
444 	switch (lp->type) {
445 	case LDC_CTRL:
446 		ldc_rx_ctrl(lc, lp);
447 		break;
448 
449 	case LDC_DATA:
450 		ldc_rx_data(lc, lp);
451 		break;
452 
453 	default:
454 		DPRINTF(("%0x02/%0x02/%0x02\n", lp->type, lp->stype,
455 		    lp->ctrl));
456 		ldc_reset(lc);
457 		break;
458 	}
459 
460 	if (lc->lc_state == 0)
461 		return (1);
462 
463 	rx_head += sizeof(*lp);
464 	rx_head &= ((lc->lc_rxq->lq_nentries * sizeof(*lp)) - 1);
465 	err = hv_ldc_rx_set_qhead(lc->lc_id, rx_head);
466 	if (err != H_EOK)
467 		printf("%s: hv_ldc_rx_set_qhead %d\n", __func__, err);
468 
469 	return (1);
470 }
471 
472 void
473 vdsk_rx_data(struct ldc_conn *lc, struct ldc_pkt *lp)
474 {
475 	struct vio_msg *vm = (struct vio_msg *)lp;
476 
477 	switch (vm->type) {
478 	case VIO_TYPE_CTRL:
479 		if ((lp->env & LDC_FRAG_START) == 0 &&
480 		    (lp->env & LDC_FRAG_STOP) == 0)
481 			return;
482 		vdsk_rx_vio_ctrl(lc->lc_sc, vm);
483 		break;
484 
485 	case VIO_TYPE_DATA:
486 		if((lp->env & LDC_FRAG_START) == 0)
487 			return;
488 		vdsk_rx_vio_data(lc->lc_sc, vm);
489 		break;
490 
491 	default:
492 		DPRINTF(("Unhandled packet type 0x%02x\n", vm->type));
493 		ldc_reset(lc);
494 		break;
495 	}
496 }
497 
498 void
499 vdsk_rx_vio_ctrl(struct vdsk_softc *sc, struct vio_msg *vm)
500 {
501 	struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type;
502 
503 	switch (tag->stype_env) {
504 	case VIO_VER_INFO:
505 		vdsk_rx_vio_ver_info(sc, tag);
506 		break;
507 	case VIO_ATTR_INFO:
508 		vdsk_rx_vio_attr_info(sc, tag);
509 		break;
510 	case VIO_DRING_REG:
511 		vdsk_rx_vio_dring_reg(sc, tag);
512 		break;
513 	case VIO_RDX:
514 		vdsk_rx_vio_rdx(sc, tag);
515 		break;
516 	default:
517 		DPRINTF(("CTRL/0x%02x/0x%04x\n", tag->stype, tag->stype_env));
518 		break;
519 	}
520 }
521 
522 void
523 vdsk_rx_vio_ver_info(struct vdsk_softc *sc, struct vio_msg_tag *tag)
524 {
525 	struct vio_ver_info *vi = (struct vio_ver_info *)tag;
526 
527 	switch (vi->tag.stype) {
528 	case VIO_SUBTYPE_INFO:
529 		DPRINTF(("CTRL/INFO/VER_INFO\n"));
530 		break;
531 
532 	case VIO_SUBTYPE_ACK:
533 		DPRINTF(("CTRL/ACK/VER_INFO\n"));
534 		if (!ISSET(sc->sc_vio_state, VIO_SND_VER_INFO)) {
535 			ldc_reset(&sc->sc_lc);
536 			break;
537 		}
538 		sc->sc_major = vi->major;
539 		sc->sc_minor = vi->minor;
540 		sc->sc_vio_state |= VIO_ACK_VER_INFO;
541 		break;
542 
543 	default:
544 		DPRINTF(("CTRL/0x%02x/VER_INFO\n", vi->tag.stype));
545 		break;
546 	}
547 
548 	if (ISSET(sc->sc_vio_state, VIO_ACK_VER_INFO))
549 		vdsk_send_attr_info(sc);
550 }
551 
552 void
553 vdsk_rx_vio_attr_info(struct vdsk_softc *sc, struct vio_msg_tag *tag)
554 {
555 	struct vd_attr_info *ai = (struct vd_attr_info *)tag;
556 
557 	switch (ai->tag.stype) {
558 	case VIO_SUBTYPE_INFO:
559 		DPRINTF(("CTRL/INFO/ATTR_INFO\n"));
560 		break;
561 
562 	case VIO_SUBTYPE_ACK:
563 		DPRINTF(("CTRL/ACK/ATTR_INFO\n"));
564 		if (!ISSET(sc->sc_vio_state, VIO_SND_ATTR_INFO)) {
565 			ldc_reset(&sc->sc_lc);
566 			break;
567 		}
568 
569 		sc->sc_vdisk_block_size = ai->vdisk_block_size;
570 		sc->sc_vdisk_size = ai->vdisk_size;
571 		if (sc->sc_major > 1 || sc->sc_minor >= 1)
572 			sc->sc_vd_mtype = ai->vd_mtype;
573 		else
574 			sc->sc_vd_mtype = VD_MEDIA_TYPE_FIXED;
575 
576 		sc->sc_vio_state |= VIO_ACK_ATTR_INFO;
577 		break;
578 
579 	default:
580 		DPRINTF(("CTRL/0x%02x/ATTR_INFO\n", ai->tag.stype));
581 		break;
582 	}
583 
584 	if (ISSET(sc->sc_vio_state, VIO_ACK_ATTR_INFO))
585 		vdsk_send_dring_reg(sc);
586 
587 }
588 
589 void
590 vdsk_rx_vio_dring_reg(struct vdsk_softc *sc, struct vio_msg_tag *tag)
591 {
592 	struct vio_dring_reg *dr = (struct vio_dring_reg *)tag;
593 
594 	switch (dr->tag.stype) {
595 	case VIO_SUBTYPE_INFO:
596 		DPRINTF(("CTRL/INFO/DRING_REG\n"));
597 		break;
598 
599 	case VIO_SUBTYPE_ACK:
600 		DPRINTF(("CTRL/ACK/DRING_REG\n"));
601 		if (!ISSET(sc->sc_vio_state, VIO_SND_DRING_REG)) {
602 			ldc_reset(&sc->sc_lc);
603 			break;
604 		}
605 
606 		sc->sc_dring_ident = dr->dring_ident;
607 		sc->sc_seq_no = 1;
608 
609 		sc->sc_vio_state |= VIO_ACK_DRING_REG;
610 		break;
611 
612 	default:
613 		DPRINTF(("CTRL/0x%02x/DRING_REG\n", dr->tag.stype));
614 		break;
615 	}
616 
617 	if (ISSET(sc->sc_vio_state, VIO_ACK_DRING_REG))
618 		vdsk_send_rdx(sc);
619 }
620 
621 void
622 vdsk_rx_vio_rdx(struct vdsk_softc *sc, struct vio_msg_tag *tag)
623 {
624 	switch(tag->stype) {
625 	case VIO_SUBTYPE_INFO:
626 		DPRINTF(("CTRL/INFO/RDX\n"));
627 		break;
628 
629 	case VIO_SUBTYPE_ACK:
630 	{
631 		int prod;
632 
633 		DPRINTF(("CTRL/ACK/RDX\n"));
634 		if (!ISSET(sc->sc_vio_state, VIO_SND_RDX)) {
635 			ldc_reset(&sc->sc_lc);
636 			break;
637 		}
638 		sc->sc_vio_state |= VIO_ACK_RDX;
639 
640 		/*
641 		 * If this ACK is the result of a reconnect, we may
642 		 * have pending I/O that we need to resubmit.  We need
643 		 * to rebuild the ring descriptors though since the
644 		 * vDisk server on the other side may have touched
645 		 * them already.  So we just clean up the ring and the
646 		 * LDC map and resubmit the SCSI commands based on our
647 		 * soft descriptors.
648 		 */
649 		prod = sc->sc_tx_prod;
650 		sc->sc_tx_prod = sc->sc_tx_cons;
651 		sc->sc_tx_cnt = 0;
652 		sc->sc_lm->lm_next = 1;
653 		sc->sc_lm->lm_count = 1;
654 		while (sc->sc_tx_prod != prod)
655 			vdsk_submit_cmd(sc->sc_vsd[sc->sc_tx_prod].vsd_xs);
656 
657 		scsi_iopool_run(&sc->sc_iopool);
658 		break;
659 	}
660 
661 	default:
662 		DPRINTF(("CTRL/0x%02x/RDX (VIO)\n", tag->stype));
663 		break;
664 	}
665 }
666 
667 void
668 vdsk_rx_vio_data(struct vdsk_softc *sc, struct vio_msg *vm)
669 {
670 	struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type;
671 
672 	if (sc->sc_vio_state != VIO_ESTABLISHED) {
673 		DPRINTF(("Spurious DATA/0x%02x/0x%04x\n", tag->stype,
674 		    tag->stype_env));
675 		return;
676 	}
677 
678 	switch(tag->stype_env) {
679 	case VIO_DRING_DATA:
680 		vdsk_rx_vio_dring_data(sc, tag);
681 		break;
682 
683 	default:
684 		DPRINTF(("DATA/0x%02x/0x%04x\n", tag->stype, tag->stype_env));
685 		break;
686 	}
687 }
688 
689 void
690 vdsk_rx_vio_dring_data(struct vdsk_softc *sc, struct vio_msg_tag *tag)
691 {
692 	switch(tag->stype) {
693 	case VIO_SUBTYPE_INFO:
694 		DPRINTF(("DATA/INFO/DRING_DATA\n"));
695 		break;
696 
697 	case VIO_SUBTYPE_ACK:
698 	{
699 		struct scsi_xfer *xs;
700 		int cons;
701 
702 		cons = sc->sc_tx_cons;
703 		while (sc->sc_vd->vd_desc[cons].hdr.dstate == VIO_DESC_DONE) {
704 			xs = sc->sc_vsd[cons].vsd_xs;
705 			if (ISSET(xs->flags, SCSI_POLL) == 0)
706 				vdsk_complete_cmd(xs, cons);
707 			cons++;
708 			cons &= (sc->sc_vd->vd_nentries - 1);
709 		}
710 		sc->sc_tx_cons = cons;
711 		break;
712 	}
713 
714 	case VIO_SUBTYPE_NACK:
715 		DPRINTF(("DATA/NACK/DRING_DATA\n"));
716 		break;
717 
718 	default:
719 		DPRINTF(("DATA/0x%02x/DRING_DATA\n", tag->stype));
720 		break;
721 	}
722 }
723 
724 void
725 vdsk_ldc_reset(struct ldc_conn *lc)
726 {
727 	struct vdsk_softc *sc = lc->lc_sc;
728 
729 	sc->sc_vio_state = 0;
730 }
731 
732 void
733 vdsk_ldc_start(struct ldc_conn *lc)
734 {
735 	struct vdsk_softc *sc = lc->lc_sc;
736 
737 	vdsk_send_ver_info(sc, VDSK_MAJOR, VDSK_MINOR);
738 }
739 
740 void
741 vdsk_sendmsg(struct vdsk_softc *sc, void *msg, size_t len)
742 {
743 	struct ldc_conn *lc = &sc->sc_lc;
744 	int err;
745 
746 	err = ldc_send_unreliable(lc, msg, len);
747 	if (err)
748 		printf("%s: ldc_send_unreliable: %d\n", __func__, err);
749 }
750 
751 void
752 vdsk_send_ver_info(struct vdsk_softc *sc, uint16_t major, uint16_t minor)
753 {
754 	struct vio_ver_info vi;
755 
756 	/* Allocate new session ID. */
757 	sc->sc_local_sid = tick();
758 
759 	bzero(&vi, sizeof(vi));
760 	vi.tag.type = VIO_TYPE_CTRL;
761 	vi.tag.stype = VIO_SUBTYPE_INFO;
762 	vi.tag.stype_env = VIO_VER_INFO;
763 	vi.tag.sid = sc->sc_local_sid;
764 	vi.major = major;
765 	vi.minor = minor;
766 	vi.dev_class = VDEV_DISK;
767 	vdsk_sendmsg(sc, &vi, sizeof(vi));
768 
769 	sc->sc_vio_state |= VIO_SND_VER_INFO;
770 }
771 
772 void
773 vdsk_send_attr_info(struct vdsk_softc *sc)
774 {
775 	struct vd_attr_info ai;
776 
777 	bzero(&ai, sizeof(ai));
778 	ai.tag.type = VIO_TYPE_CTRL;
779 	ai.tag.stype = VIO_SUBTYPE_INFO;
780 	ai.tag.stype_env = VIO_ATTR_INFO;
781 	ai.tag.sid = sc->sc_local_sid;
782 	ai.xfer_mode = VIO_DRING_MODE;
783 	ai.vdisk_block_size = DEV_BSIZE;
784 	ai.max_xfer_sz = MAXPHYS / DEV_BSIZE;
785 	vdsk_sendmsg(sc, &ai, sizeof(ai));
786 
787 	sc->sc_vio_state |= VIO_SND_ATTR_INFO;
788 }
789 
790 void
791 vdsk_send_dring_reg(struct vdsk_softc *sc)
792 {
793 	struct vio_dring_reg dr;
794 
795 	bzero(&dr, sizeof(dr));
796 	dr.tag.type = VIO_TYPE_CTRL;
797 	dr.tag.stype = VIO_SUBTYPE_INFO;
798 	dr.tag.stype_env = VIO_DRING_REG;
799 	dr.tag.sid = sc->sc_local_sid;
800 	dr.dring_ident = 0;
801 	dr.num_descriptors = sc->sc_vd->vd_nentries;
802 	dr.descriptor_size = sizeof(struct vd_desc);
803 	dr.options = VIO_TX_RING | VIO_RX_RING;
804 	dr.ncookies = 1;
805 	dr.cookie[0].addr = 0;
806 	dr.cookie[0].size = PAGE_SIZE;
807 	vdsk_sendmsg(sc, &dr, sizeof(dr));
808 
809 	sc->sc_vio_state |= VIO_SND_DRING_REG;
810 };
811 
812 void
813 vdsk_send_rdx(struct vdsk_softc *sc)
814 {
815 	struct vio_rdx rdx;
816 
817 	bzero(&rdx, sizeof(rdx));
818 	rdx.tag.type = VIO_TYPE_CTRL;
819 	rdx.tag.stype = VIO_SUBTYPE_INFO;
820 	rdx.tag.stype_env = VIO_RDX;
821 	rdx.tag.sid = sc->sc_local_sid;
822 	vdsk_sendmsg(sc, &rdx, sizeof(rdx));
823 
824 	sc->sc_vio_state |= VIO_SND_RDX;
825 }
826 
827 struct vdsk_dring *
828 vdsk_dring_alloc(bus_dma_tag_t t, int nentries)
829 {
830 	struct vdsk_dring *vd;
831 	bus_size_t size;
832 	caddr_t va;
833 	int nsegs;
834 	int i;
835 
836 	vd = malloc(sizeof(struct vdsk_dring), M_DEVBUF, M_NOWAIT);
837 	if (vd == NULL)
838 		return NULL;
839 
840 	size = roundup(nentries * sizeof(struct vd_desc), PAGE_SIZE);
841 
842 	if (bus_dmamap_create(t, size, 1, size, 0,
843 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &vd->vd_map) != 0)
844 		goto error;
845 
846 	if (bus_dmamem_alloc(t, size, PAGE_SIZE, 0, &vd->vd_seg, 1,
847 	    &nsegs, BUS_DMA_NOWAIT) != 0)
848 		goto destroy;
849 
850 	if (bus_dmamem_map(t, &vd->vd_seg, 1, size, &va,
851 	    BUS_DMA_NOWAIT) != 0)
852 		goto free;
853 
854 	if (bus_dmamap_load(t, vd->vd_map, va, size, NULL,
855 	    BUS_DMA_NOWAIT) != 0)
856 		goto unmap;
857 
858 	vd->vd_desc = (struct vd_desc *)va;
859 	vd->vd_nentries = nentries;
860 	bzero(vd->vd_desc, nentries * sizeof(struct vd_desc));
861 	for (i = 0; i < vd->vd_nentries; i++)
862 		vd->vd_desc[i].hdr.dstate = VIO_DESC_FREE;
863 	return (vd);
864 
865 unmap:
866 	bus_dmamem_unmap(t, va, size);
867 free:
868 	bus_dmamem_free(t, &vd->vd_seg, 1);
869 destroy:
870 	bus_dmamap_destroy(t, vd->vd_map);
871 error:
872 	free(vd, M_DEVBUF, sizeof(struct vdsk_dring));
873 
874 	return (NULL);
875 }
876 
877 void
878 vdsk_dring_free(bus_dma_tag_t t, struct vdsk_dring *vd)
879 {
880 	bus_size_t size;
881 
882 	size = vd->vd_nentries * sizeof(struct vd_desc);
883 	size = roundup(size, PAGE_SIZE);
884 
885 	bus_dmamap_unload(t, vd->vd_map);
886 	bus_dmamem_unmap(t, (caddr_t)vd->vd_desc, size);
887 	bus_dmamem_free(t, &vd->vd_seg, 1);
888 	bus_dmamap_destroy(t, vd->vd_map);
889 	free(vd, M_DEVBUF, 0);
890 }
891 
892 void *
893 vdsk_io_get(void *xsc)
894 {
895 	struct vdsk_softc *sc = xsc;
896 	void *rv = sc; /* just has to be !NULL */
897 	int s;
898 
899 	s = splbio();
900 	if (sc->sc_vio_state != VIO_ESTABLISHED ||
901 	    sc->sc_tx_cnt >= sc->sc_vd->vd_nentries)
902 		rv = NULL;
903 	else
904 		sc->sc_tx_cnt++;
905 	splx(s);
906 
907 	return (rv);
908 }
909 
910 void
911 vdsk_io_put(void *xsc, void *io)
912 {
913 	struct vdsk_softc *sc = xsc;
914 	int s;
915 
916 #ifdef DIAGNOSTIC
917 	if (sc != io)
918 		panic("vsdk_io_put: unexpected io");
919 #endif
920 
921 	s = splbio();
922 	sc->sc_tx_cnt--;
923 	splx(s);
924 }
925 
926 void
927 vdsk_scsi_cmd(struct scsi_xfer *xs)
928 {
929 	struct vdsk_softc *sc = xs->sc_link->adapter_softc;
930 	int timeout, s;
931 	int desc;
932 
933 	switch (xs->cmd->opcode) {
934 	case READ_BIG:
935 	case READ_COMMAND:
936 	case READ_12:
937 	case READ_16:
938 	case WRITE_BIG:
939 	case WRITE_COMMAND:
940 	case WRITE_12:
941 	case WRITE_16:
942 	case SYNCHRONIZE_CACHE:
943 		break;
944 
945 	case INQUIRY:
946 		vdsk_scsi_inq(xs);
947 		return;
948 	case READ_CAPACITY:
949 		vdsk_scsi_capacity(xs);
950 		return;
951 	case READ_CAPACITY_16:
952 		vdsk_scsi_capacity16(xs);
953 		return;
954 
955 	case TEST_UNIT_READY:
956 	case START_STOP:
957 	case PREVENT_ALLOW:
958 		vdsk_scsi_done(xs, XS_NOERROR);
959 		return;
960 
961 	default:
962 		printf("%s cmd 0x%02x\n", __func__, xs->cmd->opcode);
963 	case MODE_SENSE:
964 	case MODE_SENSE_BIG:
965 	case REPORT_LUNS:
966 	case READ_TOC:
967 		vdsk_scsi_done(xs, XS_DRIVER_STUFFUP);
968 		return;
969 	}
970 
971 	s = splbio();
972 	desc = vdsk_submit_cmd(xs);
973 
974 	if (!ISSET(xs->flags, SCSI_POLL)) {
975 		splx(s);
976 		return;
977 	}
978 
979 	timeout = 1000;
980 	do {
981 		if (sc->sc_vd->vd_desc[desc].hdr.dstate == VIO_DESC_DONE)
982 			break;
983 
984 		delay(1000);
985 	} while(--timeout > 0);
986 	if (sc->sc_vd->vd_desc[desc].hdr.dstate == VIO_DESC_DONE) {
987 		vdsk_complete_cmd(xs, desc);
988 	} else {
989 		ldc_reset(&sc->sc_lc);
990 		vdsk_scsi_done(xs, XS_TIMEOUT);
991 	}
992 	splx(s);
993 }
994 
995 int
996 vdsk_submit_cmd(struct scsi_xfer *xs)
997 {
998 	struct vdsk_softc *sc = xs->sc_link->adapter_softc;
999 	struct ldc_map *map = sc->sc_lm;
1000 	struct vio_dring_msg dm;
1001 	struct scsi_rw *rw;
1002 	struct scsi_rw_big *rwb;
1003 	struct scsi_rw_12 *rw12;
1004 	struct scsi_rw_16 *rw16;
1005 	u_int64_t lba;
1006 	u_int32_t sector_count;
1007 	uint8_t operation;
1008 	vaddr_t va;
1009 	paddr_t pa;
1010 	psize_t nbytes;
1011 	int len, ncookies;
1012 	int desc;
1013 
1014 	switch (xs->cmd->opcode) {
1015 	case READ_BIG:
1016 	case READ_COMMAND:
1017 	case READ_12:
1018 	case READ_16:
1019 		operation = VD_OP_BREAD;
1020 		break;
1021 
1022 	case WRITE_BIG:
1023 	case WRITE_COMMAND:
1024 	case WRITE_12:
1025 	case WRITE_16:
1026 		operation = VD_OP_BWRITE;
1027 		break;
1028 
1029 	case SYNCHRONIZE_CACHE:
1030 		operation = VD_OP_FLUSH;
1031 		break;
1032 	}
1033 
1034 	/*
1035 	 * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE has same
1036 	 * layout as 10-byte READ/WRITE commands.
1037 	 */
1038 	if (xs->cmdlen == 6) {
1039 		rw = (struct scsi_rw *)xs->cmd;
1040 		lba = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
1041 		sector_count = rw->length ? rw->length : 0x100;
1042 	} else if (xs->cmdlen == 10) {
1043 		rwb = (struct scsi_rw_big *)xs->cmd;
1044 		lba = _4btol(rwb->addr);
1045 		sector_count = _2btol(rwb->length);
1046 	} else if (xs->cmdlen == 12) {
1047 		rw12 = (struct scsi_rw_12 *)xs->cmd;
1048 		lba = _4btol(rw12->addr);
1049 		sector_count = _4btol(rw12->length);
1050 	} else if (xs->cmdlen == 16) {
1051 		rw16 = (struct scsi_rw_16 *)xs->cmd;
1052 		lba = _8btol(rw16->addr);
1053 		sector_count = _4btol(rw16->length);
1054 	}
1055 
1056 	desc = sc->sc_tx_prod;
1057 
1058 	ncookies = 0;
1059 	len = xs->datalen;
1060 	va = (vaddr_t)xs->data;
1061 	while (len > 0) {
1062 		KASSERT(ncookies < MAXPHYS / PAGE_SIZE);
1063 		pmap_extract(pmap_kernel(), va, &pa);
1064 		while (map->lm_slot[map->lm_next].entry != 0) {
1065 			map->lm_next++;
1066 			map->lm_next &= (map->lm_nentries - 1);
1067 		}
1068 		map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK);
1069 		map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR | LDC_MTE_CPW;
1070 		map->lm_slot[map->lm_next].entry |= LDC_MTE_IOR | LDC_MTE_IOW;
1071 		map->lm_slot[map->lm_next].entry |= LDC_MTE_R | LDC_MTE_W;
1072 		map->lm_count++;
1073 
1074 		nbytes = MIN(len, PAGE_SIZE - (pa & PAGE_MASK));
1075 
1076 		sc->sc_vd->vd_desc[desc].cookie[ncookies].addr =
1077 		    map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK);
1078 		sc->sc_vd->vd_desc[desc].cookie[ncookies].size = nbytes;
1079 
1080 		sc->sc_vsd[desc].vsd_map_idx[ncookies] = map->lm_next;
1081 		va += nbytes;
1082 		len -= nbytes;
1083 		ncookies++;
1084 	}
1085 
1086 	if (ISSET(xs->flags, SCSI_POLL) == 0)
1087 		sc->sc_vd->vd_desc[desc].hdr.ack = 1;
1088 	else
1089 		sc->sc_vd->vd_desc[desc].hdr.ack = 0;
1090 	sc->sc_vd->vd_desc[desc].operation = operation;
1091 	sc->sc_vd->vd_desc[desc].slice = VD_SLICE_NONE;
1092 	sc->sc_vd->vd_desc[desc].status = 0xffffffff;
1093 	sc->sc_vd->vd_desc[desc].offset = lba;
1094 	sc->sc_vd->vd_desc[desc].size = xs->datalen;
1095 	sc->sc_vd->vd_desc[desc].ncookies = ncookies;
1096 	membar(Sync);
1097 	sc->sc_vd->vd_desc[desc].hdr.dstate = VIO_DESC_READY;
1098 
1099 	sc->sc_vsd[desc].vsd_xs = xs;
1100 	sc->sc_vsd[desc].vsd_ncookies = ncookies;
1101 
1102 	sc->sc_tx_prod++;
1103 	sc->sc_tx_prod &= (sc->sc_vd->vd_nentries - 1);
1104 
1105 	bzero(&dm, sizeof(dm));
1106 	dm.tag.type = VIO_TYPE_DATA;
1107 	dm.tag.stype = VIO_SUBTYPE_INFO;
1108 	dm.tag.stype_env = VIO_DRING_DATA;
1109 	dm.tag.sid = sc->sc_local_sid;
1110 	dm.seq_no = sc->sc_seq_no++;
1111 	dm.dring_ident = sc->sc_dring_ident;
1112 	dm.start_idx = dm.end_idx = desc;
1113 	vdsk_sendmsg(sc, &dm, sizeof(dm));
1114 
1115 	return desc;
1116 }
1117 
1118 void
1119 vdsk_complete_cmd(struct scsi_xfer *xs, int desc)
1120 {
1121 	struct vdsk_softc *sc = xs->sc_link->adapter_softc;
1122 	struct ldc_map *map = sc->sc_lm;
1123 	int cookie, idx;
1124 	int error;
1125 
1126 	cookie = 0;
1127 	while (cookie < sc->sc_vsd[desc].vsd_ncookies) {
1128 		idx = sc->sc_vsd[desc].vsd_map_idx[cookie++];
1129 		map->lm_slot[idx].entry = 0;
1130 		map->lm_count--;
1131 	}
1132 
1133 	error = XS_NOERROR;
1134 	if (sc->sc_vd->vd_desc[desc].status != 0)
1135 		error = XS_DRIVER_STUFFUP;
1136 	xs->resid = xs->datalen -
1137 		sc->sc_vd->vd_desc[desc].size;
1138 	vdsk_scsi_done(xs, error);
1139 
1140 	sc->sc_vd->vd_desc[desc].hdr.dstate = VIO_DESC_FREE;
1141 }
1142 
1143 void
1144 vdsk_scsi_inq(struct scsi_xfer *xs)
1145 {
1146 	struct scsi_inquiry *inq = (struct scsi_inquiry *)xs->cmd;
1147 
1148 	if (ISSET(inq->flags, SI_EVPD))
1149 		vdsk_scsi_done(xs, XS_DRIVER_STUFFUP);
1150 	else
1151 		vdsk_scsi_inquiry(xs);
1152 }
1153 
1154 void
1155 vdsk_scsi_inquiry(struct scsi_xfer *xs)
1156 {
1157 	struct vdsk_softc *sc = xs->sc_link->adapter_softc;
1158 	struct scsi_inquiry_data inq;
1159 	char buf[5];
1160 
1161 	bzero(&inq, sizeof(inq));
1162 
1163 	switch (sc->sc_vd_mtype) {
1164 	case VD_MEDIA_TYPE_CD:
1165 	case VD_MEDIA_TYPE_DVD:
1166 		inq.device = T_CDROM;
1167 		break;
1168 
1169 	case VD_MEDIA_TYPE_FIXED:
1170 	default:
1171 		inq.device = T_DIRECT;
1172 		break;
1173 	}
1174 
1175 	inq.version = 0x05; /* SPC-3 */
1176 	inq.response_format = 2;
1177 	inq.additional_length = 32;
1178 	inq.flags |= SID_CmdQue;
1179 	bcopy("SUN     ", inq.vendor, sizeof(inq.vendor));
1180 	bcopy("Virtual Disk    ", inq.product, sizeof(inq.product));
1181 	snprintf(buf, sizeof(buf), "%u.%u ", sc->sc_major, sc->sc_minor);
1182 	bcopy(buf, inq.revision, sizeof(inq.revision));
1183 
1184 	bcopy(&inq, xs->data, MIN(sizeof(inq), xs->datalen));
1185 
1186 	vdsk_scsi_done(xs, XS_NOERROR);
1187 }
1188 
1189 void
1190 vdsk_scsi_capacity(struct scsi_xfer *xs)
1191 {
1192 	struct vdsk_softc *sc = xs->sc_link->adapter_softc;
1193 	struct scsi_read_cap_data rcd;
1194 	uint64_t capacity;
1195 
1196 	bzero(&rcd, sizeof(rcd));
1197 
1198 	capacity = sc->sc_vdisk_size - 1;
1199 	if (capacity > 0xffffffff)
1200 		capacity = 0xffffffff;
1201 
1202 	_lto4b(capacity, rcd.addr);
1203 	_lto4b(sc->sc_vdisk_block_size, rcd.length);
1204 
1205 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
1206 
1207 	vdsk_scsi_done(xs, XS_NOERROR);
1208 }
1209 
1210 void
1211 vdsk_scsi_capacity16(struct scsi_xfer *xs)
1212 {
1213 	struct vdsk_softc *sc = xs->sc_link->adapter_softc;
1214 	struct scsi_read_cap_data_16 rcd;
1215 
1216 	bzero(&rcd, sizeof(rcd));
1217 
1218 	_lto8b(sc->sc_vdisk_size - 1, rcd.addr);
1219 	_lto4b(sc->sc_vdisk_block_size, rcd.length);
1220 
1221 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
1222 
1223 	vdsk_scsi_done(xs, XS_NOERROR);
1224 }
1225 
1226 void
1227 vdsk_scsi_done(struct scsi_xfer *xs, int error)
1228 {
1229 	xs->error = error;
1230 
1231 	scsi_done(xs);
1232 }
1233 
1234 int
1235 vdsk_dev_probe(struct scsi_link *link)
1236 {
1237 	KASSERT(link->lun == 0);
1238 
1239 	if (link->target == 0)
1240 		return (0);
1241 
1242 	return (ENODEV);
1243 }
1244 
1245 void
1246 vdsk_dev_free(struct scsi_link *link)
1247 {
1248 	printf("%s\n", __func__);
1249 }
1250