Lines Matching refs:vd

219 	    (vd->dring + (i)*vd->descriptor_size))
222 #define VD_CLIENT(vd) \ argument
223 (((vd)->xfer_mode == VIO_DESC_MODE) ? "in-band client" : \
224 (((vd)->xfer_mode == VIO_DRING_MODE_V1_0) ? "dring client" : \
225 (((vd)->xfer_mode == 0) ? "null client" : \
229 #define VD_DSKIMG_LABEL_READ(vd, labelp) \ argument
230 vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, (caddr_t)labelp, \
234 #define VD_DSKIMG_LABEL_WRITE(vd, labelp) \ argument
235 vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE, (caddr_t)labelp, \
239 #define VD_DSKIMG(vd) ((vd)->vdisk_type == VD_DISK_TYPE_DISK && \ argument
240 ((vd)->file || (vd)->volume))
243 #define VD_WRITE_INDEX_NEXT(vd, id) \ argument
244 ((((id) + 1) >= vd->dring_len)? 0 : (id) + 1)
425 struct vd *vd; /* vd instance task is for */ member
441 typedef struct vd { struct
521 #define VD_LABEL_VTOC(vd) \ argument
522 ((struct dk_label *)(void *)((vd)->flabel))
524 #define VD_LABEL_EFI_GPT(vd, lba) \ argument
525 ((efi_gpt_t *)(void *)((vd)->flabel + (lba)))
526 #define VD_LABEL_EFI_GPE(vd, lba) \ argument
527 ((efi_gpe_t *)(void *)((vd)->flabel + 2 * (lba)))
668 static int vd_setup_vd(vd_t *vd);
669 static int vd_setup_single_slice_disk(vd_t *vd);
670 static int vd_setup_slice_image(vd_t *vd);
671 static int vd_setup_disk_image(vd_t *vd);
672 static int vd_backend_check_size(vd_t *vd);
673 static boolean_t vd_enabled(vd_t *vd);
675 static int vd_dskimg_validate_geometry(vd_t *vd);
676 static boolean_t vd_dskimg_is_iso_image(vd_t *vd);
677 static void vd_set_exported_operations(vd_t *vd);
678 static void vd_reset_access(vd_t *vd);
679 static int vd_backend_ioctl(vd_t *vd, int cmd, caddr_t arg);
687 static boolean_t vd_slice_geom_isvalid(vd_t *vd, struct dk_geom *geom);
688 static boolean_t vd_slice_vtoc_isvalid(vd_t *vd, struct extvtoc *vtoc);
756 vd_dskimg_io_params(vd_t *vd, int slice, size_t *blkp, size_t *lenp) in vd_dskimg_io_params() argument
762 ASSERT(vd->file || VD_DSKIMG(vd)); in vd_dskimg_io_params()
764 ASSERT(vd->vdisk_bsize == DEV_BSIZE); in vd_dskimg_io_params()
772 if (vd->vdisk_type == VD_DISK_TYPE_SLICE || slice == VD_SLICE_NONE) { in vd_dskimg_io_params()
775 if (offset >= vd->dskimg_size) { in vd_dskimg_io_params()
778 offset, vd->dskimg_size); in vd_dskimg_io_params()
781 maxlen = vd->dskimg_size - offset; in vd_dskimg_io_params()
791 if (vd->vdisk_label == VD_DISK_LABEL_UNK && in vd_dskimg_io_params()
792 vio_ver_is_supported(vd->version, 1, 1)) { in vd_dskimg_io_params()
793 (void) vd_dskimg_validate_geometry(vd); in vd_dskimg_io_params()
794 if (vd->vdisk_label == VD_DISK_LABEL_UNK) { in vd_dskimg_io_params()
801 if (vd->vdisk_label == VD_DISK_LABEL_VTOC) { in vd_dskimg_io_params()
802 ASSERT(vd->vtoc.v_sectorsz == DEV_BSIZE); in vd_dskimg_io_params()
804 ASSERT(vd->vdisk_label == VD_DISK_LABEL_EFI); in vd_dskimg_io_params()
807 if (blk >= vd->slices[slice].nblocks) { in vd_dskimg_io_params()
810 blk, vd->slices[slice].nblocks); in vd_dskimg_io_params()
814 offset = (vd->slices[slice].start + blk) * DEV_BSIZE; in vd_dskimg_io_params()
815 maxlen = (vd->slices[slice].nblocks - blk) * DEV_BSIZE; in vd_dskimg_io_params()
834 if ((offset + len) > vd->dskimg_size) { in vd_dskimg_io_params()
836 "dskimg_size (0x%lx)", offset, len, vd->dskimg_size); in vd_dskimg_io_params()
873 vd_dskimg_rw(vd_t *vd, int slice, int operation, caddr_t data, size_t offset, in vd_dskimg_rw() argument
880 ASSERT(vd->file || VD_DSKIMG(vd)); in vd_dskimg_rw()
882 ASSERT(vd->vdisk_bsize == DEV_BSIZE); in vd_dskimg_rw()
884 if ((status = vd_dskimg_io_params(vd, slice, &offset, &len)) != 0) in vd_dskimg_rw()
887 if (vd->volume) { in vd_dskimg_rw()
894 buf.b_edev = vd->dev[0]; in vd_dskimg_rw()
907 if (ldi_strategy(vd->ldi_handle[0], &buf) != 0) { in vd_dskimg_rw()
924 ASSERT(vd->file); in vd_dskimg_rw()
927 vd->file_vnode, data, len, offset * DEV_BSIZE, UIO_SYSSPACE, FSYNC, in vd_dskimg_rw()
1081 vd_dskimg_set_vtoc(vd_t *vd, struct dk_label *label) in vd_dskimg_set_vtoc() argument
1085 ASSERT(VD_DSKIMG(vd)); in vd_dskimg_set_vtoc()
1087 if (VD_DSKIMG_LABEL_WRITE(vd, label) < 0) { in vd_dskimg_set_vtoc()
1120 if (vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE, in vd_dskimg_set_vtoc()
1151 vd_dskimg_get_devid_block(vd_t *vd, size_t *blkp) in vd_dskimg_get_devid_block() argument
1155 ASSERT(VD_DSKIMG(vd)); in vd_dskimg_get_devid_block()
1157 if (vd->vdisk_label == VD_DISK_LABEL_UNK) { in vd_dskimg_get_devid_block()
1165 if (vd->vdisk_label == VD_DISK_LABEL_EFI) { in vd_dskimg_get_devid_block()
1170 if (vd->efi_reserved == -1) { in vd_dskimg_get_devid_block()
1175 *blkp = vd->slices[vd->efi_reserved].start; in vd_dskimg_get_devid_block()
1179 ASSERT(vd->vdisk_label == VD_DISK_LABEL_VTOC); in vd_dskimg_get_devid_block()
1182 if (vd->dk_geom.dkg_acyl < 2) { in vd_dskimg_get_devid_block()
1184 "(acyl=%u)", vd->dk_geom.dkg_acyl); in vd_dskimg_get_devid_block()
1189 cyl = vd->dk_geom.dkg_ncyl + vd->dk_geom.dkg_acyl - 2; in vd_dskimg_get_devid_block()
1190 spc = vd->dk_geom.dkg_nhead * vd->dk_geom.dkg_nsect; in vd_dskimg_get_devid_block()
1191 head = vd->dk_geom.dkg_nhead - 1; in vd_dskimg_get_devid_block()
1193 *blkp = (cyl * (spc - vd->dk_geom.dkg_apc)) + in vd_dskimg_get_devid_block()
1194 (head * vd->dk_geom.dkg_nsect) + 1; in vd_dskimg_get_devid_block()
1234 vd_dskimg_read_devid(vd_t *vd, ddi_devid_t *devid) in vd_dskimg_read_devid() argument
1241 ASSERT(vd->vdisk_bsize == DEV_BSIZE); in vd_dskimg_read_devid()
1243 if ((status = vd_dskimg_get_devid_block(vd, &blk)) != 0) in vd_dskimg_read_devid()
1249 if ((vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, (caddr_t)dkdevid, blk, in vd_dskimg_read_devid()
1310 vd_dskimg_write_devid(vd_t *vd, ddi_devid_t devid) in vd_dskimg_write_devid() argument
1317 ASSERT(vd->vdisk_bsize == DEV_BSIZE); in vd_dskimg_write_devid()
1324 if ((status = vd_dskimg_get_devid_block(vd, &blk)) != 0) in vd_dskimg_write_devid()
1343 if ((status = vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE, in vd_dskimg_write_devid()
1376 vd_do_scsi_rdwr(vd_t *vd, int operation, caddr_t data, size_t blk, size_t len) in vd_do_scsi_rdwr() argument
1384 ASSERT(!vd->file); in vd_do_scsi_rdwr()
1385 ASSERT(!vd->volume); in vd_do_scsi_rdwr()
1386 ASSERT(vd->vdisk_bsize > 0); in vd_do_scsi_rdwr()
1388 max_sectors = vd->max_xfer_sz; in vd_do_scsi_rdwr()
1389 nblk = (len / vd->vdisk_bsize); in vd_do_scsi_rdwr()
1391 if (len % vd->vdisk_bsize != 0) in vd_do_scsi_rdwr()
1412 if (blk < (2 << 20) && nsectors <= 0xff && !vd->is_atapi_dev) { in vd_do_scsi_rdwr()
1429 ucmd.uscsi_buflen = nsectors * vd->backend_bsize; in vd_do_scsi_rdwr()
1444 status = ldi_ioctl(vd->ldi_handle[VD_ENTIRE_DISK_SLICE], in vd_do_scsi_rdwr()
1445 USCSICMD, (intptr_t)&ucmd, (vd->open_flags | FKIOCTL), in vd_do_scsi_rdwr()
1474 data += nsectors * vd->vdisk_bsize; in vd_do_scsi_rdwr()
1503 vd_scsi_rdwr(vd_t *vd, int operation, caddr_t data, size_t vblk, size_t vlen) in vd_scsi_rdwr() argument
1513 if (vd->backend_bsize == 0) { in vd_scsi_rdwr()
1518 if (vd_backend_check_size(vd) != 0) in vd_scsi_rdwr()
1529 if (vd->vdisk_bsize == vd->backend_bsize) in vd_scsi_rdwr()
1530 return (vd_do_scsi_rdwr(vd, operation, data, vblk, vlen)); in vd_scsi_rdwr()
1532 if (vd->vdisk_bsize > vd->backend_bsize) in vd_scsi_rdwr()
1568 pblk = (vblk * vd->vdisk_bsize) / vd->backend_bsize; in vd_scsi_rdwr()
1569 delta = (vblk * vd->vdisk_bsize) - (pblk * vd->backend_bsize); in vd_scsi_rdwr()
1570 pnblk = ((delta + vlen - 1) / vd->backend_bsize) + 1; in vd_scsi_rdwr()
1571 plen = pnblk * vd->backend_bsize; in vd_scsi_rdwr()
1576 rv = vd_do_scsi_rdwr(vd, operation, (caddr_t)buf, pblk, plen); in vd_scsi_rdwr()
1603 vd_slice_flabel_read(vd_t *vd, caddr_t data, size_t offset, size_t length) in vd_slice_flabel_read() argument
1606 uint_t limit = vd->flabel_limit * vd->vdisk_bsize; in vd_slice_flabel_read()
1608 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); in vd_slice_flabel_read()
1609 ASSERT(vd->flabel != NULL); in vd_slice_flabel_read()
1616 if (offset < vd->flabel_size) { in vd_slice_flabel_read()
1618 if (offset + length <= vd->flabel_size) { in vd_slice_flabel_read()
1619 bcopy(vd->flabel + offset, data, length); in vd_slice_flabel_read()
1623 n = vd->flabel_size - offset; in vd_slice_flabel_read()
1624 bcopy(vd->flabel + offset, data, n); in vd_slice_flabel_read()
1659 vd_slice_flabel_write(vd_t *vd, caddr_t data, size_t offset, size_t length) in vd_slice_flabel_write() argument
1661 uint_t limit = vd->flabel_limit * vd->vdisk_bsize; in vd_slice_flabel_write()
1666 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); in vd_slice_flabel_write()
1667 ASSERT(vd->flabel != NULL); in vd_slice_flabel_write()
1677 if (vd->vdisk_label == VD_DISK_LABEL_VTOC && in vd_slice_flabel_write()
1678 offset == 0 && length == vd->vdisk_bsize) { in vd_slice_flabel_write()
1688 if (vd_slice_geom_isvalid(vd, &geom) && in vd_slice_flabel_write()
1689 vd_slice_vtoc_isvalid(vd, &vtoc)) in vd_slice_flabel_write()
1745 vd_slice_fake_rdwr(vd_t *vd, int slice, int operation, caddr_t *datap, in vd_slice_fake_rdwr() argument
1754 size_t bsize = vd->vdisk_bsize; in vd_slice_fake_rdwr()
1756 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); in vd_slice_fake_rdwr()
1769 vd->vdisk_label != VD_DISK_LABEL_VTOC) && in vd_slice_fake_rdwr()
1771 vd->vdisk_label != VD_DISK_LABEL_EFI)) { in vd_slice_fake_rdwr()
1780 n = vd_slice_flabel_write(vd, data, blk * bsize, length); in vd_slice_fake_rdwr()
1782 n = vd_slice_flabel_read(vd, data, blk * bsize, length); in vd_slice_fake_rdwr()
1800 if (vd->vdisk_label == VD_DISK_LABEL_VTOC && in vd_slice_fake_rdwr()
1806 if (vd->vdisk_label == VD_DISK_LABEL_EFI) { in vd_slice_fake_rdwr()
1808 ablk = vd->vdisk_size - asize; in vd_slice_fake_rdwr()
1810 ASSERT(vd->vdisk_label == VD_DISK_LABEL_VTOC); in vd_slice_fake_rdwr()
1811 ASSERT(vd->dk_geom.dkg_apc == 0); in vd_slice_fake_rdwr()
1813 csize = vd->dk_geom.dkg_nhead * vd->dk_geom.dkg_nsect; in vd_slice_fake_rdwr()
1814 ablk = vd->dk_geom.dkg_ncyl * csize; in vd_slice_fake_rdwr()
1815 asize = vd->dk_geom.dkg_acyl * csize; in vd_slice_fake_rdwr()
1855 if (vd->vdisk_label == VD_DISK_LABEL_VTOC) { in vd_slice_fake_rdwr()
1857 label = VD_LABEL_VTOC(vd); in vd_slice_fake_rdwr()
1878 ASSERT(length == 0 || blk >= vd->flabel_limit); in vd_slice_fake_rdwr()
1885 *blkp = blk - vd->flabel_limit; in vd_slice_fake_rdwr()
1892 vd_flush_write(vd_t *vd) in vd_flush_write() argument
1896 if (vd->file) { in vd_flush_write()
1897 status = VOP_FSYNC(vd->file_vnode, FSYNC, kcred, NULL); in vd_flush_write()
1899 status = ldi_ioctl(vd->ldi_handle[0], DKIOCFLUSHWRITECACHE, in vd_flush_write()
1900 (intptr_t)NULL, vd->open_flags | FKIOCTL, kcred, &rval); in vd_flush_write()
1911 vd_t *vd = task->vd; in vd_bio_task() local
1915 ASSERT(vd->vdisk_bsize == DEV_BSIZE); in vd_bio_task()
1917 if (vd->zvol) { in vd_bio_task()
1919 status = ldi_strategy(vd->ldi_handle[0], buf); in vd_bio_task()
1923 ASSERT(vd->file); in vd_bio_task()
1926 vd->file_vnode, buf->b_un.b_addr, buf->b_bcount, in vd_bio_task()
1972 vd_t *vd = task->vd; in vd_start_bio() local
1981 ASSERT(vd != NULL); in vd_start_bio()
1986 ASSERT(slice == VD_SLICE_NONE || slice < vd->nslices); in vd_start_bio()
2004 if (request->operation == VD_OP_BWRITE && !(vd->open_flags & FWRITE)) { in vd_start_bio()
2042 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) { in vd_start_bio()
2046 rv = vd_slice_fake_rdwr(vd, slice, request->operation, in vd_start_bio()
2080 } else if (vd->volume || vd->file) { in vd_start_bio()
2082 rv = vd_dskimg_io_params(vd, slice, &offset, &length); in vd_start_bio()
2102 rv = vd_scsi_rdwr(vd, request->operation, bufaddr, offset, in vd_start_bio()
2119 buf->b_edev = vd->dev[slice]; in vd_start_bio()
2123 if (vd->file || vd->zvol) { in vd_start_bio()
2158 task->write_index = vd->write_index; in vd_start_bio()
2159 vd->write_queue[task->write_index] = buf; in vd_start_bio()
2160 vd->write_index = in vd_start_bio()
2161 VD_WRITE_INDEX_NEXT(vd, vd->write_index); in vd_start_bio()
2166 ASSERT(vd->ioq != NULL); in vd_start_bio()
2169 (void) ddi_taskq_dispatch(task->vd->ioq, vd_bio_task, buf, in vd_start_bio()
2181 buf->b_lblkno = offset << vd->vio_bshift; in vd_start_bio()
2183 request->status = ldi_strategy(vd->ldi_handle[slice], buf); in vd_start_bio()
2250 vd_need_reset(vd_t *vd, boolean_t reset_ldc) in vd_need_reset() argument
2252 mutex_enter(&vd->lock); in vd_need_reset()
2253 vd->reset_state = B_TRUE; in vd_need_reset()
2254 vd->reset_ldc = reset_ldc; in vd_need_reset()
2255 mutex_exit(&vd->lock); in vd_need_reset()
2264 vd_reset_if_needed(vd_t *vd) in vd_reset_if_needed() argument
2268 mutex_enter(&vd->lock); in vd_reset_if_needed()
2269 if (!vd->reset_state) { in vd_reset_if_needed()
2270 ASSERT(!vd->reset_ldc); in vd_reset_if_needed()
2271 mutex_exit(&vd->lock); in vd_reset_if_needed()
2274 mutex_exit(&vd->lock); in vd_reset_if_needed()
2276 PR0("Resetting connection state with %s", VD_CLIENT(vd)); in vd_reset_if_needed()
2283 if (vd->ioq != NULL) in vd_reset_if_needed()
2284 ddi_taskq_wait(vd->ioq); in vd_reset_if_needed()
2285 ddi_taskq_wait(vd->completionq); in vd_reset_if_needed()
2287 status = vd_flush_write(vd); in vd_reset_if_needed()
2292 if ((vd->initialized & VD_DRING) && in vd_reset_if_needed()
2293 ((status = ldc_mem_dring_unmap(vd->dring_handle)) != 0)) in vd_reset_if_needed()
2296 vd_free_dring_task(vd); in vd_reset_if_needed()
2299 if (vd->vio_msgp != NULL) { in vd_reset_if_needed()
2300 kmem_free(vd->vio_msgp, vd->max_msglen); in vd_reset_if_needed()
2301 vd->vio_msgp = NULL; in vd_reset_if_needed()
2305 if (vd->inband_task.msg != NULL) { in vd_reset_if_needed()
2306 kmem_free(vd->inband_task.msg, vd->max_msglen); in vd_reset_if_needed()
2307 vd->inband_task.msg = NULL; in vd_reset_if_needed()
2310 mutex_enter(&vd->lock); in vd_reset_if_needed()
2312 if (vd->reset_ldc) in vd_reset_if_needed()
2314 if (vd->reset_ldc && ((status = ldc_down(vd->ldc_handle)) != 0)) in vd_reset_if_needed()
2318 vd_reset_access(vd); in vd_reset_if_needed()
2320 vd->initialized &= ~(VD_SID | VD_SEQ_NUM | VD_DRING); in vd_reset_if_needed()
2321 vd->state = VD_STATE_INIT; in vd_reset_if_needed()
2322 vd->max_msglen = sizeof (vio_msg_t); /* baseline vio message size */ in vd_reset_if_needed()
2325 vd->vio_msgp = kmem_alloc(vd->max_msglen, KM_SLEEP); in vd_reset_if_needed()
2328 (void) ldc_up(vd->ldc_handle); in vd_reset_if_needed()
2330 vd->reset_state = B_FALSE; in vd_reset_if_needed()
2331 vd->reset_ldc = B_FALSE; in vd_reset_if_needed()
2333 mutex_exit(&vd->lock); in vd_reset_if_needed()
2339 vd_mark_in_reset(vd_t *vd) in vd_mark_in_reset() argument
2345 vd_need_reset(vd, B_FALSE); in vd_mark_in_reset()
2346 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd, DDI_SLEEP); in vd_mark_in_reset()
2349 vd_need_reset(vd, B_TRUE); in vd_mark_in_reset()
2355 vd_mark_elem_done(vd_t *vd, int idx, int elem_status, int elem_nbytes) in vd_mark_elem_done() argument
2362 if (vd->reset_state) in vd_mark_elem_done()
2366 if ((status = VIO_DRING_ACQUIRE(&otd, vd->dring_mtype, in vd_mark_elem_done()
2367 vd->dring_handle, idx, idx)) != 0) { in vd_mark_elem_done()
2369 vd_mark_in_reset(vd); in vd_mark_elem_done()
2388 if ((status = VIO_DRING_RELEASE(vd->dring_mtype, in vd_mark_elem_done()
2389 vd->dring_handle, idx, idx)) != 0) { in vd_mark_elem_done()
2391 vd_mark_in_reset(vd); in vd_mark_elem_done()
2416 vd_t *vd = task->vd; in vd_complete_bio() local
2422 ASSERT(vd != NULL); in vd_complete_bio()
2458 if (vd->write_queue[wid] != NULL) { in vd_complete_bio()
2460 vd->write_queue[wid] = NULL; in vd_complete_bio()
2461 wid = VD_WRITE_INDEX_NEXT(vd, wid); in vd_complete_bio()
2470 while (vd->write_queue[wid] != NULL) { in vd_complete_bio()
2471 (void) biowait(vd->write_queue[wid]); in vd_complete_bio()
2472 vd->write_queue[wid] = NULL; in vd_complete_bio()
2473 wid = VD_WRITE_INDEX_NEXT(vd, wid); in vd_complete_bio()
2481 request->status = vd_flush_write(vd); in vd_complete_bio()
2484 (void (*)(void *))vd_flush_write, vd, in vd_complete_bio()
2495 if (!vd->reset_state) in vd_complete_bio()
2501 vd_mark_in_reset(vd); in vd_complete_bio()
2512 vd_mark_in_reset(vd); in vd_complete_bio()
2540 ASSERT(task->vd != NULL); in vd_notify()
2550 status = send_msg(task->vd->ldc_handle, task->msg, task->msglen); in vd_notify()
2555 vd_mark_in_reset(task->vd); in vd_notify()
2559 vd_need_reset(task->vd, B_TRUE); in vd_notify()
2581 vd_t *vd = task->vd; in vd_complete_notify() local
2585 if (!vd->reset_state && (vd->xfer_mode == VIO_DRING_MODE_V1_0)) { in vd_complete_notify()
2586 status = vd_mark_elem_done(vd, task->index, in vd_complete_notify()
2589 vd_mark_in_reset(vd); in vd_complete_notify()
2591 vd_need_reset(vd, B_TRUE); in vd_complete_notify()
2616 if (!vd->reset_state) in vd_complete_notify()
2964 vd_slice_geom_isvalid(vd_t *vd, struct dk_geom *geom) in vd_slice_geom_isvalid() argument
2966 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); in vd_slice_geom_isvalid()
2967 ASSERT(vd->vdisk_label == VD_DISK_LABEL_VTOC); in vd_slice_geom_isvalid()
2969 if (geom->dkg_ncyl != vd->dk_geom.dkg_ncyl || in vd_slice_geom_isvalid()
2970 geom->dkg_acyl != vd->dk_geom.dkg_acyl || in vd_slice_geom_isvalid()
2971 geom->dkg_nsect != vd->dk_geom.dkg_nsect || in vd_slice_geom_isvalid()
2972 geom->dkg_pcyl != vd->dk_geom.dkg_pcyl) in vd_slice_geom_isvalid()
2984 vd_slice_vtoc_isvalid(vd_t *vd, struct extvtoc *vtoc) in vd_slice_vtoc_isvalid() argument
2989 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); in vd_slice_vtoc_isvalid()
2990 ASSERT(vd->vdisk_label == VD_DISK_LABEL_VTOC); in vd_slice_vtoc_isvalid()
2992 if (vtoc->v_sanity != vd->vtoc.v_sanity || in vd_slice_vtoc_isvalid()
2993 vtoc->v_version != vd->vtoc.v_version || in vd_slice_vtoc_isvalid()
2994 vtoc->v_nparts != vd->vtoc.v_nparts || in vd_slice_vtoc_isvalid()
2995 strcmp(vtoc->v_volume, vd->vtoc.v_volume) != 0 || in vd_slice_vtoc_isvalid()
2996 strcmp(vtoc->v_asciilabel, vd->vtoc.v_asciilabel) != 0) in vd_slice_vtoc_isvalid()
3001 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_start || in vd_slice_vtoc_isvalid()
3003 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_size) in vd_slice_vtoc_isvalid()
3015 csize = vd->dk_geom.dkg_nhead * vd->dk_geom.dkg_nsect; in vd_slice_vtoc_isvalid()
3021 if (vd->vtoc.v_part[0].p_size >= 4 * csize && in vd_slice_vtoc_isvalid()
3022 vtoc->v_part[0].p_size < vd->vtoc.v_part[0].p_size - 4 *csize) in vd_slice_vtoc_isvalid()
3045 vd_do_slice_ioctl(vd_t *vd, int cmd, void *ioctl_arg) in vd_do_slice_ioctl() argument
3052 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); in vd_do_slice_ioctl()
3055 return (vd_flush_write(vd)); in vd_do_slice_ioctl()
3057 switch (vd->vdisk_label) { in vd_do_slice_ioctl()
3066 bcopy(&vd->dk_geom, ioctl_arg, sizeof (vd->dk_geom)); in vd_do_slice_ioctl()
3071 bcopy(&vd->vtoc, ioctl_arg, sizeof (vd->vtoc)); in vd_do_slice_ioctl()
3081 if (!vd_slice_geom_isvalid(vd, geom)) in vd_do_slice_ioctl()
3093 if (!vd_slice_vtoc_isvalid(vd, vtoc)) in vd_do_slice_ioctl()
3121 len = vd_slice_flabel_read(vd, in vd_do_slice_ioctl()
3123 lba * vd->vdisk_bsize, len); in vd_do_slice_ioctl()
3145 vds_efi_alloc_and_read(vd_t *vd, efi_gpt_t **gpt, efi_gpe_t **gpe) in vds_efi_alloc_and_read() argument
3150 VD_EFI_DEV_SET(edev, vd, (vd_efi_ioctl_func)vd_backend_ioctl); in vds_efi_alloc_and_read()
3158 vds_efi_free(vd_t *vd, efi_gpt_t *gpt, efi_gpe_t *gpe) in vds_efi_free() argument
3162 VD_EFI_DEV_SET(edev, vd, (vd_efi_ioctl_func)vd_backend_ioctl); in vds_efi_free()
3168 vd_dskimg_validate_efi(vd_t *vd) in vd_dskimg_validate_efi() argument
3175 if ((status = vds_efi_alloc_and_read(vd, &gpt, &gpe)) != 0) in vd_dskimg_validate_efi()
3178 bzero(&vd->vtoc, sizeof (struct extvtoc)); in vd_dskimg_validate_efi()
3179 bzero(&vd->dk_geom, sizeof (struct dk_geom)); in vd_dskimg_validate_efi()
3180 bzero(vd->slices, sizeof (vd_slice_t) * VD_MAXPART); in vd_dskimg_validate_efi()
3182 vd->efi_reserved = -1; in vd_dskimg_validate_efi()
3193 vd->slices[i].start = gpe[i].efi_gpe_StartingLBA; in vd_dskimg_validate_efi()
3194 vd->slices[i].nblocks = gpe[i].efi_gpe_EndingLBA - in vd_dskimg_validate_efi()
3199 vd->efi_reserved = i; in vd_dskimg_validate_efi()
3203 ASSERT(vd->vdisk_size != 0); in vd_dskimg_validate_efi()
3204 vd->slices[VD_EFI_WD_SLICE].start = 0; in vd_dskimg_validate_efi()
3205 vd->slices[VD_EFI_WD_SLICE].nblocks = vd->vdisk_size; in vd_dskimg_validate_efi()
3207 vds_efi_free(vd, gpt, gpe); in vd_dskimg_validate_efi()
3235 vd_dskimg_validate_geometry(vd_t *vd) in vd_dskimg_validate_geometry() argument
3238 struct dk_geom *geom = &vd->dk_geom; in vd_dskimg_validate_geometry()
3239 struct extvtoc *vtoc = &vd->vtoc; in vd_dskimg_validate_geometry()
3243 ASSERT(VD_DSKIMG(vd)); in vd_dskimg_validate_geometry()
3245 if (VD_DSKIMG_LABEL_READ(vd, &label) < 0) in vd_dskimg_validate_geometry()
3254 if (vd_dskimg_validate_efi(vd) == 0) { in vd_dskimg_validate_geometry()
3255 vd->vdisk_label = VD_DISK_LABEL_EFI; in vd_dskimg_validate_geometry()
3259 vd->vdisk_label = VD_DISK_LABEL_UNK; in vd_dskimg_validate_geometry()
3260 vd_build_default_label(vd->dskimg_size, vd->vdisk_bsize, in vd_dskimg_validate_geometry()
3264 vd->vdisk_label = VD_DISK_LABEL_VTOC; in vd_dskimg_validate_geometry()
3271 bzero(vd->slices, sizeof (vd_slice_t) * VD_MAXPART); in vd_dskimg_validate_geometry()
3272 if (vd->vdisk_label != VD_DISK_LABEL_UNK) { in vd_dskimg_validate_geometry()
3274 vd->slices[i].start = vtoc->v_part[i].p_start; in vd_dskimg_validate_geometry()
3275 vd->slices[i].nblocks = vtoc->v_part[i].p_size; in vd_dskimg_validate_geometry()
3290 vd_do_dskimg_ioctl(vd_t *vd, int cmd, void *ioctl_arg) in vd_do_dskimg_ioctl() argument
3298 ASSERT(VD_DSKIMG(vd)); in vd_do_dskimg_ioctl()
3306 rc = vd_dskimg_validate_geometry(vd); in vd_do_dskimg_ioctl()
3309 bcopy(&vd->dk_geom, geom, sizeof (struct dk_geom)); in vd_do_dskimg_ioctl()
3316 rc = vd_dskimg_validate_geometry(vd); in vd_do_dskimg_ioctl()
3319 bcopy(&vd->vtoc, vtoc, sizeof (struct extvtoc)); in vd_do_dskimg_ioctl()
3335 bcopy(ioctl_arg, &vd->dk_geom, sizeof (vd->dk_geom)); in vd_do_dskimg_ioctl()
3340 ASSERT(vd->dk_geom.dkg_nhead != 0 && in vd_do_dskimg_ioctl()
3341 vd->dk_geom.dkg_nsect != 0); in vd_do_dskimg_ioctl()
3349 vd_vtocgeom_to_label(vtoc, &vd->dk_geom, &label); in vd_do_dskimg_ioctl()
3352 if ((rc = vd_dskimg_set_vtoc(vd, &label)) != 0) in vd_do_dskimg_ioctl()
3358 return (vd_flush_write(vd)); in vd_do_dskimg_ioctl()
3364 if (vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, in vd_do_dskimg_ioctl()
3374 if (vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE, in vd_do_dskimg_ioctl()
3388 (void) vd_dskimg_validate_geometry(vd); in vd_do_dskimg_ioctl()
3395 if (vd_dskimg_write_devid(vd, vd->dskimg_devid) != 0) { in vd_do_dskimg_ioctl()
3403 vd_backend_ioctl(vd_t *vd, int cmd, caddr_t arg) in vd_backend_ioctl() argument
3412 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) { in vd_backend_ioctl()
3415 status = vd_do_slice_ioctl(vd, cmd, arg); in vd_backend_ioctl()
3417 } else if (VD_DSKIMG(vd)) { in vd_backend_ioctl()
3420 status = vd_do_dskimg_ioctl(vd, cmd, arg); in vd_backend_ioctl()
3425 status = ldi_ioctl(vd->ldi_handle[0], cmd, (intptr_t)arg, in vd_backend_ioctl()
3426 vd->open_flags | FKIOCTL, kcred, &rval); in vd_backend_ioctl()
3439 status = ldi_ioctl(vd->ldi_handle[0], cmd, in vd_backend_ioctl()
3440 (intptr_t)&vtoc, vd->open_flags | FKIOCTL, in vd_backend_ioctl()
3450 status = ldi_ioctl(vd->ldi_handle[0], cmd, in vd_backend_ioctl()
3451 (intptr_t)&vtoc, vd->open_flags | FKIOCTL, in vd_backend_ioctl()
3482 vd_do_ioctl(vd_t *vd, vd_dring_payload_t *request, void* buf, vd_ioctl_t *ioctl) in vd_do_ioctl() argument
3488 ASSERT(request->slice < vd->nslices); in vd_do_ioctl()
3495 if ((status = ldc_mem_copy(vd->ldc_handle, buf, 0, &nbytes, in vd_do_ioctl()
3522 if (!(vd->open_flags & FWRITE) && in vd_do_ioctl()
3533 request->status = vd_backend_ioctl(vd, ioctl->cmd, ioctl->arg); in vd_do_ioctl()
3561 if ((status = ldc_mem_copy(vd->ldc_handle, buf, 0, &nbytes, in vd_do_ioctl()
3613 vd_t *vd = task->vd; in vd_ioctl() local
3658 ASSERT(vd != NULL); in vd_ioctl()
3660 ASSERT(request->slice < vd->nslices); in vd_ioctl()
3695 if (!(vd->open_flags & FWRITE) && ioctl[i].write) { in vd_ioctl()
3704 status = vd_do_ioctl(vd, request, buf, &ioctl[i]); in vd_ioctl()
3714 vd_t *vd = task->vd; in vd_get_devid() local
3723 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) { in vd_get_devid()
3736 if (VD_DSKIMG(vd)) { in vd_get_devid()
3737 if (vd->dskimg_devid == NULL) { in vd_get_devid()
3742 sz = ddi_devid_sizeof(vd->dskimg_devid); in vd_get_devid()
3744 bcopy(vd->dskimg_devid, devid, sz); in vd_get_devid()
3747 if (ddi_lyr_get_devid(vd->dev[request->slice], in vd_get_devid()
3778 if ((status = ldc_mem_copy(vd->ldc_handle, (caddr_t)vd_devid, 0, in vd_get_devid()
3793 vd_scsi_reset(vd_t *vd) in vd_scsi_reset() argument
3801 status = ldi_ioctl(vd->ldi_handle[0], USCSICMD, (intptr_t)&uscsi, in vd_scsi_reset()
3802 (vd->open_flags | FKIOCTL), kcred, &rval); in vd_scsi_reset()
3810 vd_t *vd = task->vd; in vd_reset() local
3814 ASSERT(vd->scsi); in vd_reset()
3824 request->status = vd_scsi_reset(vd); in vd_reset()
3834 vd_t *vd = task->vd; in vd_get_capacity() local
3855 (void) vd_backend_check_size(vd); in vd_get_capacity()
3856 ASSERT(vd->vdisk_size != 0); in vd_get_capacity()
3860 vd_cap.vdisk_block_size = vd->vdisk_bsize; in vd_get_capacity()
3861 vd_cap.vdisk_size = vd->vdisk_size; in vd_get_capacity()
3863 if ((rv = ldc_mem_copy(vd->ldc_handle, (char *)&vd_cap, 0, &nbytes, in vd_get_capacity()
3878 vd_t *vd = task->vd; in vd_get_access() local
3882 ASSERT(vd->scsi); in vd_get_access()
3894 request->status = ldi_ioctl(vd->ldi_handle[request->slice], MHIOCSTATUS, in vd_get_access()
3895 (intptr_t)NULL, (vd->open_flags | FKIOCTL), kcred, &rval); in vd_get_access()
3902 if ((rv = ldc_mem_copy(vd->ldc_handle, (char *)&access, 0, &nbytes, in vd_get_access()
3917 vd_t *vd = task->vd; in vd_set_access() local
3921 ASSERT(vd->scsi); in vd_set_access()
3931 if ((rv = ldc_mem_copy(vd->ldc_handle, (char *)&flags, 0, &nbytes, in vd_set_access()
3939 request->status = ldi_ioctl(vd->ldi_handle[request->slice], in vd_set_access()
3940 MHIOCRELEASE, (intptr_t)NULL, (vd->open_flags | FKIOCTL), in vd_set_access()
3943 vd->ownership = B_FALSE; in vd_set_access()
3966 request->status = ldi_ioctl(vd->ldi_handle[request->slice], in vd_set_access()
3967 MHIOCTKOWN, (intptr_t)NULL, (vd->open_flags | FKIOCTL), in vd_set_access()
3982 request->status = ldi_ioctl(vd->ldi_handle[request->slice], in vd_set_access()
3983 MHIOCQRESERVE, (intptr_t)NULL, (vd->open_flags | FKIOCTL), in vd_set_access()
3987 request->status = ldi_ioctl(vd->ldi_handle[request->slice], in vd_set_access()
3988 MHIOCTKOWN, (intptr_t)NULL, (vd->open_flags | FKIOCTL), in vd_set_access()
4000 request->status = ldi_ioctl(vd->ldi_handle[request->slice], in vd_set_access()
4001 MHIOCQRESERVE, (intptr_t)NULL, (vd->open_flags | FKIOCTL), in vd_set_access()
4007 (void) vd_scsi_reset(vd); in vd_set_access()
4010 request->status = ldi_ioctl(vd->ldi_handle[request->slice], in vd_set_access()
4011 MHIOCQRESERVE, (intptr_t)NULL, (vd->open_flags | FKIOCTL), in vd_set_access()
4018 request->status = ldi_ioctl(vd->ldi_handle[request->slice], in vd_set_access()
4019 MHIOCQRESERVE, (intptr_t)NULL, (vd->open_flags | FKIOCTL), in vd_set_access()
4025 vd->ownership = B_TRUE; in vd_set_access()
4033 vd_reset_access(vd_t *vd) in vd_reset_access() argument
4037 if (vd->file || vd->volume || !vd->ownership) in vd_reset_access()
4041 status = ldi_ioctl(vd->ldi_handle[0], MHIOCRELEASE, (intptr_t)NULL, in vd_reset_access()
4042 (vd->open_flags | FKIOCTL), kcred, &rval); in vd_reset_access()
4049 vd->ownership = B_FALSE; in vd_reset_access()
4060 status = vd_scsi_reset(vd); in vd_reset_access()
4066 status = ldi_ioctl(vd->ldi_handle[0], MHIOCRELEASE, (intptr_t)NULL, in vd_reset_access()
4067 (vd->open_flags | FKIOCTL), kcred, &rval); in vd_reset_access()
4070 vd->ownership = B_FALSE; in vd_reset_access()
4087 ", rebooting the system", vd->device_path); in vd_reset_access()
4090 panic(VD_RESET_ACCESS_FAILURE_MSG, vd->device_path); in vd_reset_access()
4093 cmn_err(CE_WARN, VD_RESET_ACCESS_FAILURE_MSG, vd->device_path); in vd_reset_access()
4141 vd_t *vd = task->vd; in vd_do_process_task() local
4144 ASSERT(vd != NULL); in vd_do_process_task()
4164 if ((VD_OP_SUPPORTED(vd->operations, request->operation) == B_FALSE) || in vd_do_process_task()
4172 if (request->slice >= vd->nslices && in vd_do_process_task()
4173 ((vd->vdisk_type != VD_DISK_TYPE_DISK && vd_slice_single_slice) || in vd_do_process_task()
4176 request->slice, (vd->nslices - 1)); in vd_do_process_task()
4213 vd_t *vd = task->vd; in vd_process_task() local
4235 (void) ddi_taskq_dispatch(vd->completionq, vd_complete, in vd_process_task()
4240 if (!vd->reset_state && (vd->xfer_mode == VIO_DRING_MODE_V1_0)) { in vd_process_task()
4242 status = vd_mark_elem_done(vd, task->index, in vd_process_task()
4245 vd_mark_in_reset(vd); in vd_process_task()
4247 vd_need_reset(vd, B_TRUE); in vd_process_task()
4330 vd_process_ver_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) in vd_process_ver_msg() argument
4375 ASSERT(!(vd->initialized & VD_SID)); in vd_process_ver_msg()
4376 vd->sid = ver_msg->tag.vio_sid; in vd_process_ver_msg()
4377 vd->initialized |= VD_SID; in vd_process_ver_msg()
4384 vd->version.major = ver_msg->ver_major; in vd_process_ver_msg()
4385 vd->version.minor = ver_msg->ver_minor; in vd_process_ver_msg()
4393 vd_set_exported_operations(vd_t *vd) in vd_set_exported_operations() argument
4395 vd->operations = 0; /* clear field */ in vd_set_exported_operations()
4402 if (vio_ver_is_supported(vd->version, 1, 1)) { in vd_set_exported_operations()
4403 ASSERT(vd->open_flags & FREAD); in vd_set_exported_operations()
4404 vd->operations |= VD_OP_MASK_READ | (1 << VD_OP_GET_CAPACITY); in vd_set_exported_operations()
4406 if (vd->open_flags & FWRITE) in vd_set_exported_operations()
4407 vd->operations |= VD_OP_MASK_WRITE; in vd_set_exported_operations()
4409 if (vd->scsi) in vd_set_exported_operations()
4410 vd->operations |= VD_OP_MASK_SCSI; in vd_set_exported_operations()
4412 if (VD_DSKIMG(vd) && vd_dskimg_is_iso_image(vd)) { in vd_set_exported_operations()
4418 vd->operations &= ~VD_OP_MASK_WRITE; in vd_set_exported_operations()
4420 } else if (vio_ver_is_supported(vd->version, 1, 0)) { in vd_set_exported_operations()
4421 vd->operations = VD_OP_MASK_READ | VD_OP_MASK_WRITE; in vd_set_exported_operations()
4425 ASSERT(vd->operations != 0); in vd_set_exported_operations()
4429 vd_process_attr_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) in vd_process_attr_msg() argument
4465 if ((vd->initialized & VD_DISK_READY) == 0) { in vd_process_attr_msg()
4466 PR0("Retry setting up disk (%s)", vd->device_path); in vd_process_attr_msg()
4468 status = vd_setup_vd(vd); in vd_process_attr_msg()
4476 if (!vd_enabled(vd)) in vd_process_attr_msg()
4484 vd->initialized |= VD_DISK_READY; in vd_process_attr_msg()
4485 ASSERT(vd->nslices > 0 && vd->nslices <= V_NUMPAR); in vd_process_attr_msg()
4487 ((vd->vdisk_type == VD_DISK_TYPE_DISK) ? "disk" : "slice"), in vd_process_attr_msg()
4488 (vd->volume ? "yes" : "no"), in vd_process_attr_msg()
4489 (vd->file ? "yes" : "no"), in vd_process_attr_msg()
4490 vd->nslices); in vd_process_attr_msg()
4494 vd->xfer_mode = attr_msg->xfer_mode; in vd_process_attr_msg()
4496 if (vd->xfer_mode == VIO_DESC_MODE) { in vd_process_attr_msg()
4520 vd->max_msglen = MAX(vd->max_msglen, max_inband_msglen); in vd_process_attr_msg()
4526 vd->inband_task.vd = vd; in vd_process_attr_msg()
4527 vd->inband_task.msg = kmem_alloc(vd->max_msglen, KM_SLEEP); in vd_process_attr_msg()
4528 vd->inband_task.index = 0; in vd_process_attr_msg()
4529 vd->inband_task.type = VD_FINAL_RANGE_TASK; /* range == 1 */ in vd_process_attr_msg()
4533 attr_msg->vdisk_block_size = vd->vdisk_bsize; in vd_process_attr_msg()
4534 attr_msg->max_xfer_sz = vd->max_xfer_sz; in vd_process_attr_msg()
4536 attr_msg->vdisk_size = vd->vdisk_size; in vd_process_attr_msg()
4537 attr_msg->vdisk_type = (vd_slice_single_slice)? vd->vdisk_type : in vd_process_attr_msg()
4539 attr_msg->vdisk_media = vd->vdisk_media; in vd_process_attr_msg()
4542 vd_set_exported_operations(vd); in vd_process_attr_msg()
4543 attr_msg->operations = vd->operations; in vd_process_attr_msg()
4545 PR0("%s", VD_CLIENT(vd)); in vd_process_attr_msg()
4547 ASSERT(vd->dring_task == NULL); in vd_process_attr_msg()
4553 vd_process_dring_reg_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) in vd_process_dring_reg_msg() argument
4584 if (vd->initialized & VD_DRING) { in vd_process_dring_reg_msg()
4618 status = ldc_mem_dring_map(vd->ldc_handle, reg_msg->cookie, in vd_process_dring_reg_msg()
4620 reg_msg->descriptor_size, mtype, &vd->dring_handle); in vd_process_dring_reg_msg()
4634 ldc_mem_dring_info(vd->dring_handle, &dring_minfo)) != 0) { in vd_process_dring_reg_msg()
4636 if ((status = ldc_mem_dring_unmap(vd->dring_handle)) != 0) in vd_process_dring_reg_msg()
4648 vd->initialized |= VD_DRING; in vd_process_dring_reg_msg()
4649 vd->dring_ident = 1; /* "There Can Be Only One" */ in vd_process_dring_reg_msg()
4650 vd->dring = dring_minfo.vaddr; in vd_process_dring_reg_msg()
4651 vd->descriptor_size = reg_msg->descriptor_size; in vd_process_dring_reg_msg()
4652 vd->dring_len = reg_msg->num_descriptors; in vd_process_dring_reg_msg()
4653 vd->dring_mtype = dring_minfo.mtype; in vd_process_dring_reg_msg()
4654 reg_msg->dring_ident = vd->dring_ident; in vd_process_dring_reg_msg()
4656 vd->descriptor_size, vd->dring_len); in vd_process_dring_reg_msg()
4662 vd->dring_task = in vd_process_dring_reg_msg()
4663 kmem_zalloc((sizeof (*vd->dring_task)) * vd->dring_len, KM_SLEEP); in vd_process_dring_reg_msg()
4664 for (int i = 0; i < vd->dring_len; i++) { in vd_process_dring_reg_msg()
4665 vd->dring_task[i].vd = vd; in vd_process_dring_reg_msg()
4666 vd->dring_task[i].index = i; in vd_process_dring_reg_msg()
4668 status = ldc_mem_alloc_handle(vd->ldc_handle, in vd_process_dring_reg_msg()
4669 &(vd->dring_task[i].mhdl)); in vd_process_dring_reg_msg()
4680 vd->dring_task[i].request = kmem_zalloc((vd->descriptor_size - in vd_process_dring_reg_msg()
4682 vd->dring_task[i].msg = kmem_alloc(vd->max_msglen, KM_SLEEP); in vd_process_dring_reg_msg()
4685 if (vd->file || vd->zvol) { in vd_process_dring_reg_msg()
4686 vd->write_queue = in vd_process_dring_reg_msg()
4687 kmem_zalloc(sizeof (buf_t *) * vd->dring_len, KM_SLEEP); in vd_process_dring_reg_msg()
4694 vd_process_dring_unreg_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) in vd_process_dring_unreg_msg() argument
4713 if (unreg_msg->dring_ident != vd->dring_ident) { in vd_process_dring_unreg_msg()
4715 vd->dring_ident, unreg_msg->dring_ident); in vd_process_dring_unreg_msg()
4743 vd_check_seq_num(vd_t *vd, uint64_t seq_num) in vd_check_seq_num() argument
4745 if ((vd->initialized & VD_SEQ_NUM) && (seq_num != vd->seq_num + 1)) { in vd_check_seq_num()
4747 seq_num, (vd->seq_num + 1)); in vd_check_seq_num()
4749 vd_need_reset(vd, B_FALSE); in vd_check_seq_num()
4753 vd->seq_num = seq_num; in vd_check_seq_num()
4754 vd->initialized |= VD_SEQ_NUM; /* superfluous after first time... */ in vd_check_seq_num()
4775 vd_process_desc_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) in vd_process_desc_msg() argument
4801 if (vd_check_seq_num(vd, desc_msg->hdr.seq_num) != 0) in vd_process_desc_msg()
4813 ASSERT(vd->inband_task.msg != NULL); in vd_process_desc_msg()
4815 bcopy(msg, vd->inband_task.msg, msglen); in vd_process_desc_msg()
4816 vd->inband_task.msglen = msglen; in vd_process_desc_msg()
4822 desc_msg = (vd_dring_inband_msg_t *)vd->inband_task.msg; in vd_process_desc_msg()
4823 vd->inband_task.request = &desc_msg->payload; in vd_process_desc_msg()
4825 return (vd_process_task(&vd->inband_task)); in vd_process_desc_msg()
4829 vd_process_element(vd_t *vd, vd_task_type_t type, uint32_t idx, in vd_process_element() argument
4838 if ((status = VIO_DRING_ACQUIRE(&otd, vd->dring_mtype, in vd_process_element()
4839 vd->dring_handle, idx, idx)) != 0) { in vd_process_element()
4845 bcopy(&elem->payload, vd->dring_task[idx].request, in vd_process_element()
4846 (vd->descriptor_size - sizeof (vio_dring_entry_hdr_t))); in vd_process_element()
4851 if ((status = VIO_DRING_RELEASE(vd->dring_mtype, in vd_process_element()
4852 vd->dring_handle, idx, idx)) != 0) { in vd_process_element()
4862 vd->dring_task[idx].type = type; in vd_process_element()
4865 bcopy(msg, vd->dring_task[idx].msg, msglen); in vd_process_element()
4867 vd->dring_task[idx].msglen = msglen; in vd_process_element()
4868 return (vd_process_task(&vd->dring_task[idx])); in vd_process_element()
4872 vd_process_element_range(vd_t *vd, int start, int end, in vd_process_element_range() argument
4893 nelem = ((end < start) ? end + vd->dring_len : end) - start + 1; in vd_process_element_range()
4894 for (i = start, n = nelem; n > 0; i = (i + 1) % vd->dring_len, n--) { in vd_process_element_range()
4897 status = vd_process_element(vd, type, i, msg, msglen); in vd_process_element_range()
4913 if (vd->ioq != NULL) in vd_process_element_range()
4914 ddi_taskq_wait(vd->ioq); in vd_process_element_range()
4915 ddi_taskq_wait(vd->completionq); in vd_process_element_range()
4922 vd_process_dring_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) in vd_process_dring_msg() argument
4941 if (vd_check_seq_num(vd, dring_msg->seq_num) != 0) in vd_process_dring_msg()
4944 if (dring_msg->dring_ident != vd->dring_ident) { in vd_process_dring_msg()
4946 vd->dring_ident, dring_msg->dring_ident); in vd_process_dring_msg()
4950 if (dring_msg->start_idx >= vd->dring_len) { in vd_process_dring_msg()
4952 dring_msg->start_idx, vd->dring_len); in vd_process_dring_msg()
4957 (dring_msg->end_idx >= vd->dring_len)) { in vd_process_dring_msg()
4959 dring_msg->end_idx, vd->dring_len); in vd_process_dring_msg()
4966 return (vd_process_element_range(vd, dring_msg->start_idx, in vd_process_dring_msg()
5000 vd_do_process_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) in vd_do_process_msg() argument
5015 if ((msg->tag.vio_sid != vd->sid) && (vd->initialized & VD_SID)) { in vd_do_process_msg()
5016 PR0("Expected SID %u, received %u", vd->sid, in vd_do_process_msg()
5021 PR1("\tWhile in state %d (%s)", vd->state, vd_decode_state(vd->state)); in vd_do_process_msg()
5026 switch (vd->state) { in vd_do_process_msg()
5028 if ((status = vd_process_ver_msg(vd, msg, msglen)) != 0) in vd_do_process_msg()
5032 vd->state = VD_STATE_VER; in vd_do_process_msg()
5036 if ((status = vd_process_attr_msg(vd, msg, msglen)) != 0) in vd_do_process_msg()
5040 vd->state = VD_STATE_ATTR; in vd_do_process_msg()
5044 switch (vd->xfer_mode) { in vd_do_process_msg()
5050 vd->state = VD_STATE_DATA; in vd_do_process_msg()
5055 vd_process_dring_reg_msg(vd, msg, msglen)) != 0) in vd_do_process_msg()
5059 vd->state = VD_STATE_DRING; in vd_do_process_msg()
5071 vd->state = VD_STATE_DATA; in vd_do_process_msg()
5085 vd_process_dring_reg_msg(vd, msg, msglen)) != ENOMSG) in vd_do_process_msg()
5094 status = vd_process_dring_unreg_msg(vd, msg, msglen); in vd_do_process_msg()
5098 switch (vd->xfer_mode) { in vd_do_process_msg()
5100 return (vd_process_desc_msg(vd, msg, msglen)); in vd_do_process_msg()
5107 if ((status = vd_process_dring_msg(vd, msg, in vd_do_process_msg()
5117 status = vd_process_dring_unreg_msg(vd, msg, msglen); in vd_do_process_msg()
5134 vd_process_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) in vd_process_msg() argument
5148 vd_need_reset(vd, B_TRUE); in vd_process_msg()
5155 switch (status = vd_do_process_msg(vd, msg, msglen)) { in vd_process_msg()
5181 PR1("\tResulting in state %d (%s)", vd->state, in vd_process_msg()
5182 vd_decode_state(vd->state)); in vd_process_msg()
5185 task.vd = vd; in vd_process_msg()
5195 (void) ddi_taskq_dispatch(vd->completionq, vd_serial_notify, in vd_process_msg()
5203 ddi_taskq_wait(vd->completionq); in vd_process_msg()
5209 vd_need_reset(vd, reset_ldc); in vd_process_msg()
5216 vd_enabled(vd_t *vd) in vd_enabled() argument
5220 mutex_enter(&vd->lock); in vd_enabled()
5221 enabled = vd->enabled; in vd_enabled()
5222 mutex_exit(&vd->lock); in vd_enabled()
5229 vd_t *vd = (vd_t *)arg; in vd_recv_msg() local
5232 ASSERT(vd != NULL); in vd_recv_msg()
5237 while (vd_enabled(vd) && status == 0) { in vd_recv_msg()
5244 vd_reset_if_needed(vd); /* can change vd->max_msglen */ in vd_recv_msg()
5249 status = ldc_status(vd->ldc_handle, &lstatus); in vd_recv_msg()
5256 ASSERT(vd->max_msglen != 0); in vd_recv_msg()
5258 msgsize = vd->max_msglen; /* stable copy for alloc/free */ in vd_recv_msg()
5261 status = recv_msg(vd->ldc_handle, vd->vio_msgp, &msglen); in vd_recv_msg()
5264 rv = vd_process_msg(vd, (void *)vd->vio_msgp, msglen); in vd_recv_msg()
5266 if (msgsize != vd->max_msglen) { in vd_recv_msg()
5268 msgsize, vd->max_msglen); in vd_recv_msg()
5269 kmem_free(vd->vio_msgp, msgsize); in vd_recv_msg()
5270 vd->vio_msgp = in vd_recv_msg()
5271 kmem_alloc(vd->max_msglen, KM_SLEEP); in vd_recv_msg()
5282 vd_need_reset(vd, B_FALSE); in vd_recv_msg()
5289 vd_need_reset(vd, B_TRUE); in vd_recv_msg()
5300 vd_t *vd = (vd_t *)(void *)arg; in vd_handle_ldc_events() local
5303 ASSERT(vd != NULL); in vd_handle_ldc_events()
5305 if (!vd_enabled(vd)) in vd_handle_ldc_events()
5311 vd_need_reset(vd, B_TRUE); in vd_handle_ldc_events()
5312 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd, in vd_handle_ldc_events()
5316 vd_need_reset(vd, B_TRUE); in vd_handle_ldc_events()
5323 if (vd->state != VD_STATE_INIT) { in vd_handle_ldc_events()
5325 vd_need_reset(vd, B_FALSE); in vd_handle_ldc_events()
5326 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, in vd_handle_ldc_events()
5327 vd, DDI_SLEEP); in vd_handle_ldc_events()
5330 vd_need_reset(vd, B_TRUE); in vd_handle_ldc_events()
5336 (void) ldc_up(vd->ldc_handle); in vd_handle_ldc_events()
5345 vd_need_reset(vd, B_FALSE); in vd_handle_ldc_events()
5346 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, in vd_handle_ldc_events()
5347 vd, DDI_SLEEP); in vd_handle_ldc_events()
5350 vd_need_reset(vd, B_TRUE); in vd_handle_ldc_events()
5360 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd, in vd_handle_ldc_events()
5365 vd_need_reset(vd, B_TRUE); in vd_handle_ldc_events()
5448 vd_dskimg_is_iso_image(vd_t *vd) in vd_dskimg_is_iso_image() argument
5454 ASSERT(VD_DSKIMG(vd)); in vd_dskimg_is_iso_image()
5460 if (vd->vdisk_media == VD_MEDIA_DVD || vd->vdisk_media == VD_MEDIA_CD) in vd_dskimg_is_iso_image()
5469 sec = (ISO_VOLDESC_SEC * ISO_SECTOR_SIZE) / vd->vdisk_bsize; in vd_dskimg_is_iso_image()
5470 rv = vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, (caddr_t)iso_buf, in vd_dskimg_is_iso_image()
5498 vd_is_atapi_device(vd_t *vd) in vd_is_atapi_device() argument
5504 ASSERT(vd->ldi_handle[0] != NULL); in vd_is_atapi_device()
5505 ASSERT(!vd->file); in vd_is_atapi_device()
5507 rv = ldi_prop_lookup_string(vd->ldi_handle[0], in vd_is_atapi_device()
5510 PR0("'variant' property exists for %s", vd->device_path); in vd_is_atapi_device()
5516 rv = ldi_prop_exists(vd->ldi_handle[0], LDI_DEV_T_ANY, "atapi"); in vd_is_atapi_device()
5518 PR0("'atapi' property exists for %s", vd->device_path); in vd_is_atapi_device()
5526 vd_setup_full_disk(vd_t *vd) in vd_setup_full_disk() argument
5529 major_t major = getmajor(vd->dev[0]); in vd_setup_full_disk()
5530 minor_t minor = getminor(vd->dev[0]) - VD_ENTIRE_DISK_SLICE; in vd_setup_full_disk()
5532 ASSERT(vd->vdisk_type == VD_DISK_TYPE_DISK); in vd_setup_full_disk()
5535 status = vd_backend_check_size(vd); in vd_setup_full_disk()
5538 if (!vd->scsi) { in vd_setup_full_disk()
5541 vd->device_path, status); in vd_setup_full_disk()
5550 vd->vdisk_size = VD_SIZE_UNKNOWN; in vd_setup_full_disk()
5551 vd->vdisk_bsize = 0; in vd_setup_full_disk()
5552 vd->backend_bsize = 0; in vd_setup_full_disk()
5553 vd->vdisk_media = VD_MEDIA_FIXED; in vd_setup_full_disk()
5557 vd->dev[VD_ENTIRE_DISK_SLICE] = vd->dev[0]; in vd_setup_full_disk()
5558 vd->dev[0] = 0; in vd_setup_full_disk()
5559 vd->ldi_handle[VD_ENTIRE_DISK_SLICE] = vd->ldi_handle[0]; in vd_setup_full_disk()
5560 vd->ldi_handle[0] = NULL; in vd_setup_full_disk()
5563 for (int slice = 0; slice < vd->nslices; slice++) { in vd_setup_full_disk()
5570 ASSERT(vd->dev[slice] == 0); in vd_setup_full_disk()
5571 ASSERT(vd->ldi_handle[slice] == NULL); in vd_setup_full_disk()
5576 vd->dev[slice] = makedevice(major, (minor + slice)); in vd_setup_full_disk()
5600 status = ldi_open_by_dev(&vd->dev[slice], OTYP_BLK, in vd_setup_full_disk()
5601 vd->open_flags, kcred, &vd->ldi_handle[slice], in vd_setup_full_disk()
5602 vd->vds->ldi_ident); in vd_setup_full_disk()
5605 status = ldi_open_by_dev(&vd->dev[slice], OTYP_BLK, in vd_setup_full_disk()
5606 vd->open_flags | FNDELAY, kcred, in vd_setup_full_disk()
5607 &vd->ldi_handle[slice], vd->vds->ldi_ident); in vd_setup_full_disk()
5614 vd->ldi_handle[slice] = NULL; in vd_setup_full_disk()
5663 vd_setup_partition_vtoc(vd_t *vd) in vd_setup_partition_vtoc() argument
5665 char *device_path = vd->device_path; in vd_setup_partition_vtoc()
5670 if (vd->dk_geom.dkg_nsect == 0) { in vd_setup_partition_vtoc()
5674 if (vd->dk_geom.dkg_nhead == 0) { in vd_setup_partition_vtoc()
5680 csize = vd->dk_geom.dkg_nhead * vd->dk_geom.dkg_nsect; in vd_setup_partition_vtoc()
5686 vd->dk_geom.dkg_ncyl = vd->vdisk_size / csize + 1; in vd_setup_partition_vtoc()
5689 vd->dk_geom.dkg_acyl = 2; in vd_setup_partition_vtoc()
5690 vd->dk_geom.dkg_pcyl = vd->dk_geom.dkg_ncyl + vd->dk_geom.dkg_acyl; in vd_setup_partition_vtoc()
5694 bzero(vd->vtoc.v_part, sizeof (vd->vtoc.v_part)); in vd_setup_partition_vtoc()
5695 vd->vtoc.v_part[0].p_tag = V_UNASSIGNED; in vd_setup_partition_vtoc()
5696 vd->vtoc.v_part[0].p_flag = 0; in vd_setup_partition_vtoc()
5701 vd->vtoc.v_part[0].p_start = csize; /* start on cylinder 1 */ in vd_setup_partition_vtoc()
5702 vd->vtoc.v_part[0].p_size = (vd->vdisk_size / csize) * csize; in vd_setup_partition_vtoc()
5705 vd->vtoc.v_nparts = 1; in vd_setup_partition_vtoc()
5706 bcopy(VD_ASCIILABEL, vd->vtoc.v_asciilabel, in vd_setup_partition_vtoc()
5708 sizeof (vd->vtoc.v_asciilabel))); in vd_setup_partition_vtoc()
5709 bcopy(VD_VOLUME_NAME, vd->vtoc.v_volume, in vd_setup_partition_vtoc()
5710 MIN(sizeof (VD_VOLUME_NAME), sizeof (vd->vtoc.v_volume))); in vd_setup_partition_vtoc()
5713 vd->nslices = V_NUMPAR; in vd_setup_partition_vtoc()
5714 vd->vtoc.v_nparts = V_NUMPAR; in vd_setup_partition_vtoc()
5717 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_tag = V_BACKUP; in vd_setup_partition_vtoc()
5718 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_flag = 0; in vd_setup_partition_vtoc()
5719 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_start = 0; in vd_setup_partition_vtoc()
5720 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_size = in vd_setup_partition_vtoc()
5721 vd->dk_geom.dkg_ncyl * csize; in vd_setup_partition_vtoc()
5723 vd_get_readable_size(vd->vdisk_size * vd->vdisk_bsize, in vd_setup_partition_vtoc()
5731 vd->dk_geom.dkg_bcyl = 0; in vd_setup_partition_vtoc()
5732 vd->dk_geom.dkg_intrlv = 1; in vd_setup_partition_vtoc()
5733 vd->dk_geom.dkg_write_reinstruct = 0; in vd_setup_partition_vtoc()
5734 vd->dk_geom.dkg_read_reinstruct = 0; in vd_setup_partition_vtoc()
5740 (void) snprintf(vd->vtoc.v_asciilabel, LEN_DKL_ASCII, in vd_setup_partition_vtoc()
5743 vd->dk_geom.dkg_ncyl, vd->dk_geom.dkg_acyl, in vd_setup_partition_vtoc()
5744 vd->dk_geom.dkg_nhead, vd->dk_geom.dkg_nsect); in vd_setup_partition_vtoc()
5745 bzero(vd->vtoc.v_volume, sizeof (vd->vtoc.v_volume)); in vd_setup_partition_vtoc()
5748 vd->flabel_limit = (uint_t)csize; in vd_setup_partition_vtoc()
5749 vd->flabel_size = VD_LABEL_VTOC_SIZE(vd->vdisk_bsize); in vd_setup_partition_vtoc()
5750 vd->flabel = kmem_zalloc(vd->flabel_size, KM_SLEEP); in vd_setup_partition_vtoc()
5751 vd_vtocgeom_to_label(&vd->vtoc, &vd->dk_geom, in vd_setup_partition_vtoc()
5752 VD_LABEL_VTOC(vd)); in vd_setup_partition_vtoc()
5756 vd->vdisk_size += csize * 3; in vd_setup_partition_vtoc()
5806 vd_setup_partition_efi(vd_t *vd) in vd_setup_partition_efi() argument
5816 ASSERT(vd->vdisk_bsize > 0); in vd_setup_partition_efi()
5818 bsize = vd->vdisk_bsize; in vd_setup_partition_efi()
5824 vd->flabel_limit = (uint_t)first_u_lba; in vd_setup_partition_efi()
5825 vd->flabel_size = VD_LABEL_EFI_SIZE(bsize); in vd_setup_partition_efi()
5826 vd->flabel = kmem_zalloc(vd->flabel_size, KM_SLEEP); in vd_setup_partition_efi()
5827 gpt = VD_LABEL_EFI_GPT(vd, bsize); in vd_setup_partition_efi()
5828 gpe = VD_LABEL_EFI_GPE(vd, bsize); in vd_setup_partition_efi()
5834 vd->vdisk_size += first_u_lba; in vd_setup_partition_efi()
5836 s0_end = vd->vdisk_size - 1; in vd_setup_partition_efi()
5854 vd->nslices = V_NUMPAR; in vd_setup_partition_efi()
5865 vd->vdisk_size += EFI_MIN_RESV_SIZE; in vd_setup_partition_efi()
5868 gpt->efi_gpt_LastUsableLBA = LE_64(vd->vdisk_size - 1); in vd_setup_partition_efi()
5871 vd->vdisk_size += (EFI_MIN_ARRAY_SIZE / bsize) + 1; in vd_setup_partition_efi()
5872 gpt->efi_gpt_AlternateLBA = LE_64(vd->vdisk_size - 1); in vd_setup_partition_efi()
5889 vd_setup_backend_vnode(vd_t *vd) in vd_setup_backend_vnode() argument
5893 char *file_path = vd->device_path; in vd_setup_backend_vnode()
5897 ASSERT(!vd->volume); in vd_setup_backend_vnode()
5899 if ((status = vn_open(file_path, UIO_SYSSPACE, vd->open_flags | FOFFMAX, in vd_setup_backend_vnode()
5900 0, &vd->file_vnode, 0, 0)) != 0) { in vd_setup_backend_vnode()
5902 status == EROFS) && (!(vd->initialized & VD_SETUP_ERROR) && in vd_setup_backend_vnode()
5903 !(DEVI_IS_ATTACHING(vd->vds->dip)))) { in vd_setup_backend_vnode()
5913 vd->file = B_TRUE; in vd_setup_backend_vnode()
5915 vd->max_xfer_sz = maxphys / DEV_BSIZE; /* default transfer size */ in vd_setup_backend_vnode()
5920 dev = vd->file_vnode->v_vfsp->vfs_dev; in vd_setup_backend_vnode()
5925 vd->vds->ldi_ident); in vd_setup_backend_vnode()
5932 (intptr_t)&dk_cinfo, (vd->open_flags | FKIOCTL), kcred, in vd_setup_backend_vnode()
5941 vd->max_xfer_sz = dk_cinfo.dki_maxtransfer; in vd_setup_backend_vnode()
5949 file_path, getmajor(dev), getminor(dev), vd->max_xfer_sz); in vd_setup_backend_vnode()
5951 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) in vd_setup_backend_vnode()
5952 status = vd_setup_slice_image(vd); in vd_setup_backend_vnode()
5954 status = vd_setup_disk_image(vd); in vd_setup_backend_vnode()
5960 vd_setup_slice_image(vd_t *vd) in vd_setup_slice_image() argument
5965 if ((status = vd_backend_check_size(vd)) != 0) { in vd_setup_slice_image()
5967 vd->device_path, status); in vd_setup_slice_image()
5971 vd->vdisk_media = VD_MEDIA_FIXED; in vd_setup_slice_image()
5972 vd->vdisk_label = (vd_slice_label == VD_DISK_LABEL_UNK)? in vd_setup_slice_image()
5975 if (vd->vdisk_label == VD_DISK_LABEL_EFI || in vd_setup_slice_image()
5976 vd->dskimg_size >= 2 * ONE_TERABYTE) { in vd_setup_slice_image()
5977 status = vd_setup_partition_efi(vd); in vd_setup_slice_image()
5985 vd_build_default_label(vd->dskimg_size, vd->vdisk_bsize, in vd_setup_slice_image()
5987 vd_label_to_vtocgeom(&label, &vd->vtoc, &vd->dk_geom); in vd_setup_slice_image()
5988 status = vd_setup_partition_vtoc(vd); in vd_setup_slice_image()
5995 vd_setup_disk_image(vd_t *vd) in vd_setup_disk_image() argument
5998 char *backend_path = vd->device_path; in vd_setup_disk_image()
6000 if ((status = vd_backend_check_size(vd)) != 0) { in vd_setup_disk_image()
6007 if (vd->dskimg_size < sizeof (struct dk_label)) { in vd_setup_disk_image()
6016 status = vd_dskimg_validate_geometry(vd); in vd_setup_disk_image()
6022 if (vd_dskimg_is_iso_image(vd)) { in vd_setup_disk_image()
6028 if ((vd->vdisk_size * vd->vdisk_bsize) > ONE_GIGABYTE) in vd_setup_disk_image()
6029 vd->vdisk_media = VD_MEDIA_DVD; in vd_setup_disk_image()
6031 vd->vdisk_media = VD_MEDIA_CD; in vd_setup_disk_image()
6033 vd->vdisk_media = VD_MEDIA_FIXED; in vd_setup_disk_image()
6038 if (vd->vdisk_label != VD_DISK_LABEL_UNK) { in vd_setup_disk_image()
6040 status = vd_dskimg_read_devid(vd, &vd->dskimg_devid); in vd_setup_disk_image()
6054 vd->dskimg_devid = NULL; in vd_setup_disk_image()
6066 if (ddi_devid_init(vd->vds->dip, DEVID_FAB, 0, 0, in vd_setup_disk_image()
6067 &vd->dskimg_devid) != DDI_SUCCESS) { in vd_setup_disk_image()
6069 vd->dskimg_devid = NULL; in vd_setup_disk_image()
6078 if (vd->vdisk_label != VD_DISK_LABEL_UNK) { in vd_setup_disk_image()
6079 if (vd_dskimg_write_devid(vd, vd->dskimg_devid) != 0) { in vd_setup_disk_image()
6081 ddi_devid_free(vd->dskimg_devid); in vd_setup_disk_image()
6082 vd->dskimg_devid = NULL; in vd_setup_disk_image()
6103 vd_open_using_ldi_by_name(vd_t *vd, int flags) in vd_open_using_ldi_by_name() argument
6106 char *device_path = vd->device_path; in vd_open_using_ldi_by_name()
6110 &vd->ldi_handle[0], vd->vds->ldi_ident); in vd_open_using_ldi_by_name()
6119 kcred, &vd->ldi_handle[0], vd->vds->ldi_ident); in vd_open_using_ldi_by_name()
6123 vd->ldi_handle[0] = NULL; in vd_open_using_ldi_by_name()
6136 vd_setup_backend_ldi(vd_t *vd) in vd_setup_backend_ldi() argument
6140 char *device_path = vd->device_path; in vd_setup_backend_ldi()
6143 ASSERT(vd->ldi_handle[0] != NULL); in vd_setup_backend_ldi()
6144 ASSERT(vd->dev[0] != NULL); in vd_setup_backend_ldi()
6146 vd->file = B_FALSE; in vd_setup_backend_ldi()
6149 if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCINFO, in vd_setup_backend_ldi()
6150 (intptr_t)&dk_cinfo, (vd->open_flags | FKIOCTL), kcred, in vd_setup_backend_ldi()
6174 vd->open_flags &= ~FWRITE; in vd_setup_backend_ldi()
6176 } else if (vd->open_flags & FWRITE) { in vd_setup_backend_ldi()
6178 (void) ldi_close(vd->ldi_handle[0], vd->open_flags & ~FWRITE, in vd_setup_backend_ldi()
6180 status = vd_open_using_ldi_by_name(vd, vd->open_flags); in vd_setup_backend_ldi()
6189 vd->max_xfer_sz = dk_cinfo.dki_maxtransfer; in vd_setup_backend_ldi()
6195 vd->is_atapi_dev = vd_is_atapi_device(vd); in vd_setup_backend_ldi()
6207 if (vd->vdisk_type == VD_DISK_TYPE_DISK) { in vd_setup_backend_ldi()
6209 if (vd->volume) { in vd_setup_backend_ldi()
6211 return (vd_setup_disk_image(vd)); in vd_setup_backend_ldi()
6216 ASSERT(!vd->volume); in vd_setup_backend_ldi()
6218 vd->scsi = B_TRUE; in vd_setup_backend_ldi()
6219 return (vd_setup_full_disk(vd)); in vd_setup_backend_ldi()
6232 return (vd_setup_single_slice_disk(vd)); in vd_setup_backend_ldi()
6236 vd_setup_single_slice_disk(vd_t *vd) in vd_setup_single_slice_disk() argument
6240 char *device_path = vd->device_path; in vd_setup_single_slice_disk()
6243 vd->vdisk_media = VD_MEDIA_FIXED; in vd_setup_single_slice_disk()
6245 if (vd->volume) { in vd_setup_single_slice_disk()
6246 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); in vd_setup_single_slice_disk()
6253 vd->vdisk_type = VD_DISK_TYPE_SLICE; in vd_setup_single_slice_disk()
6254 vd->nslices = 1; in vd_setup_single_slice_disk()
6257 if ((status = vd_backend_check_size(vd)) != 0) { in vd_setup_single_slice_disk()
6272 vd->vdisk_size >= ONE_TERABYTE / vd->vdisk_bsize) { in vd_setup_single_slice_disk()
6273 vd->vdisk_label = VD_DISK_LABEL_EFI; in vd_setup_single_slice_disk()
6275 status = ldi_ioctl(vd->ldi_handle[0], DKIOCGEXTVTOC, in vd_setup_single_slice_disk()
6276 (intptr_t)&vd->vtoc, (vd->open_flags | FKIOCTL), in vd_setup_single_slice_disk()
6281 status = ldi_ioctl(vd->ldi_handle[0], DKIOCGVTOC, in vd_setup_single_slice_disk()
6282 (intptr_t)&vtoc, (vd->open_flags | FKIOCTL), in vd_setup_single_slice_disk()
6284 vtoctoextvtoc(vtoc, vd->vtoc); in vd_setup_single_slice_disk()
6288 status = ldi_ioctl(vd->ldi_handle[0], DKIOCGGEOM, in vd_setup_single_slice_disk()
6289 (intptr_t)&vd->dk_geom, (vd->open_flags | FKIOCTL), in vd_setup_single_slice_disk()
6297 vd->vdisk_label = VD_DISK_LABEL_VTOC; in vd_setup_single_slice_disk()
6301 vd->vdisk_label = VD_DISK_LABEL_VTOC; in vd_setup_single_slice_disk()
6302 vd_build_default_label(vd->vdisk_size * vd->vdisk_bsize, in vd_setup_single_slice_disk()
6303 vd->vdisk_bsize, &label); in vd_setup_single_slice_disk()
6304 vd_label_to_vtocgeom(&label, &vd->vtoc, &vd->dk_geom); in vd_setup_single_slice_disk()
6307 vd->vdisk_label = VD_DISK_LABEL_EFI; in vd_setup_single_slice_disk()
6311 if (vd->vdisk_label == VD_DISK_LABEL_VTOC) { in vd_setup_single_slice_disk()
6313 status = vd_setup_partition_vtoc(vd); in vd_setup_single_slice_disk()
6317 status = vd_setup_partition_efi(vd); in vd_setup_single_slice_disk()
6359 vd_backend_check_size(vd_t *vd) in vd_backend_check_size() argument
6368 if (vd->file) { in vd_backend_check_size()
6372 rv = VOP_GETATTR(vd->file_vnode, &vattr, 0, kcred, NULL); in vd_backend_check_size()
6374 PR0("VOP_GETATTR(%s) = errno %d", vd->device_path, rv); in vd_backend_check_size()
6381 } else if (vd->volume) { in vd_backend_check_size()
6384 rv = ldi_get_size(vd->ldi_handle[0], &backend_size); in vd_backend_check_size()
6386 PR0("ldi_get_size() failed for %s", vd->device_path); in vd_backend_check_size()
6395 rv = ldi_ioctl(vd->ldi_handle[0], DKIOCGMEDIAINFO, in vd_backend_check_size()
6396 (intptr_t)&minfo, (vd->open_flags | FKIOCTL), in vd_backend_check_size()
6400 vd->device_path, rv); in vd_backend_check_size()
6404 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) { in vd_backend_check_size()
6405 rv = ldi_get_size(vd->ldi_handle[0], &backend_size); in vd_backend_check_size()
6408 vd->device_path); in vd_backend_check_size()
6412 ASSERT(vd->vdisk_type == VD_DISK_TYPE_DISK); in vd_backend_check_size()
6434 old_size = vd->vdisk_size; in vd_backend_check_size()
6439 vd->vdisk_bsize == vdisk_bsize) in vd_backend_check_size()
6451 vd->vio_bshift = nshift; in vd_backend_check_size()
6452 vd->vdisk_size = new_size; in vd_backend_check_size()
6453 vd->vdisk_bsize = vdisk_bsize; in vd_backend_check_size()
6454 vd->backend_bsize = backend_bsize; in vd_backend_check_size()
6456 if (vd->file || vd->volume) in vd_backend_check_size()
6457 vd->dskimg_size = backend_size; in vd_backend_check_size()
6465 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) { in vd_backend_check_size()
6467 if (vd->vdisk_label == VD_DISK_LABEL_VTOC) { in vd_backend_check_size()
6468 rv = vd_setup_partition_vtoc(vd); in vd_backend_check_size()
6471 "(err = %d)", vd->device_path, rv); in vd_backend_check_size()
6475 rv = vd_setup_partition_efi(vd); in vd_backend_check_size()
6478 "(err = %d)", vd->device_path, rv); in vd_backend_check_size()
6483 } else if (!vd->file && !vd->volume) { in vd_backend_check_size()
6485 ASSERT(vd->vdisk_type == VD_DISK_TYPE_DISK); in vd_backend_check_size()
6486 vd->vdisk_media = media; in vd_backend_check_size()
6506 vd_identify_dev(vd_t *vd, int *dtype) in vd_identify_dev() argument
6509 char *device_path = vd->device_path; in vd_identify_dev()
6512 vds_t *vds = vd->vds; in vd_identify_dev()
6514 status = vd_open_using_ldi_by_name(vd, vd->open_flags & ~FWRITE); in vd_identify_dev()
6521 if ((status = ldi_get_dev(vd->ldi_handle[0], &vd->dev[0])) != 0) { in vd_identify_dev()
6531 drv_name = ddi_major_to_name(getmajor(vd->dev[0])); in vd_identify_dev()
6560 vd->zvol = B_TRUE; in vd_identify_dev()
6568 vd_setup_vd(vd_t *vd) in vd_setup_vd() argument
6573 char *path = vd->device_path; in vd_setup_vd()
6590 vd->volume = B_FALSE; in vd_setup_vd()
6591 status = vd_setup_backend_vnode(vd); in vd_setup_vd()
6630 if ((status = vd_identify_dev(vd, &drv_type)) != 0) { in vd_setup_vd()
6646 vd->volume = B_TRUE; in vd_setup_vd()
6658 if (vd->volume && vd_volume_force_slice) { in vd_setup_vd()
6659 vd->vdisk_type = VD_DISK_TYPE_SLICE; in vd_setup_vd()
6660 vd->nslices = 1; in vd_setup_vd()
6663 status = vd_setup_backend_ldi(vd); in vd_setup_vd()
6680 if (!(vd->initialized & VD_SETUP_ERROR) && in vd_setup_vd()
6681 !(DEVI_IS_ATTACHING(vd->vds->dip))) { in vd_setup_vd()
6690 vd->initialized |= VD_SETUP_ERROR; in vd_setup_vd()
6692 } else if (vd->initialized & VD_SETUP_ERROR) { in vd_setup_vd()
6695 vd->initialized &= ~VD_SETUP_ERROR; in vd_setup_vd()
6706 if ((vd->file || vd->zvol) && vd->ioq == NULL) { in vd_setup_vd()
6707 (void) snprintf(tq_name, sizeof (tq_name), "vd_ioq%lu", vd->id); in vd_setup_vd()
6709 if ((vd->ioq = ddi_taskq_create(vd->vds->dip, tq_name, in vd_setup_vd()
6727 vd_t *vd; in vds_do_init_vd() local
6735 if ((vd = kmem_zalloc(sizeof (*vd), KM_NOSLEEP)) == NULL) { in vds_do_init_vd()
6739 *vdp = vd; /* assign here so vds_destroy_vd() can cleanup later */ in vds_do_init_vd()
6740 vd->id = id; in vds_do_init_vd()
6741 vd->vds = vds; in vds_do_init_vd()
6742 (void) strncpy(vd->device_path, device_path, MAXPATHLEN); in vds_do_init_vd()
6745 vd->open_flags = FREAD; in vds_do_init_vd()
6748 vd->open_flags |= FWRITE; in vds_do_init_vd()
6751 vd->open_flags |= FEXCL; in vds_do_init_vd()
6755 vd->vdisk_type = VD_DISK_TYPE_SLICE; in vds_do_init_vd()
6756 vd->nslices = 1; in vds_do_init_vd()
6758 vd->vdisk_type = VD_DISK_TYPE_DISK; in vds_do_init_vd()
6759 vd->nslices = V_NUMPAR; in vds_do_init_vd()
6763 vd->vdisk_label = VD_DISK_LABEL_UNK; in vds_do_init_vd()
6766 if ((status = vd_setup_vd(vd)) == 0) { in vds_do_init_vd()
6767 vd->initialized |= VD_DISK_READY; in vds_do_init_vd()
6769 ASSERT(vd->nslices > 0 && vd->nslices <= V_NUMPAR); in vds_do_init_vd()
6771 ((vd->vdisk_type == VD_DISK_TYPE_DISK) ? "disk" : "slice"), in vds_do_init_vd()
6772 (vd->volume ? "yes" : "no"), (vd->file ? "yes" : "no"), in vds_do_init_vd()
6773 vd->nslices); in vds_do_init_vd()
6786 mutex_init(&vd->lock, NULL, MUTEX_DRIVER, iblock); in vds_do_init_vd()
6787 vd->initialized |= VD_LOCKING; in vds_do_init_vd()
6793 if ((vd->startq = ddi_taskq_create(vds->dip, tq_name, 1, in vds_do_init_vd()
6800 if ((vd->completionq = ddi_taskq_create(vds->dip, tq_name, 1, in vds_do_init_vd()
6807 vd->max_msglen = sizeof (vio_msg_t); /* baseline vio message size */ in vds_do_init_vd()
6808 vd->vio_msgp = kmem_alloc(vd->max_msglen, KM_SLEEP); in vds_do_init_vd()
6810 vd->enabled = 1; /* before callback can dispatch to startq */ in vds_do_init_vd()
6818 if ((status = ldc_init(ldc_id, &ldc_attr, &vd->ldc_handle)) != 0) { in vds_do_init_vd()
6823 vd->initialized |= VD_LDC; in vds_do_init_vd()
6825 if ((status = ldc_reg_callback(vd->ldc_handle, vd_handle_ldc_events, in vds_do_init_vd()
6826 (caddr_t)vd)) != 0) { in vds_do_init_vd()
6832 if ((status = ldc_open(vd->ldc_handle)) != 0) { in vds_do_init_vd()
6838 if ((status = ldc_up(vd->ldc_handle)) != 0) { in vds_do_init_vd()
6843 status = ldc_mem_alloc_handle(vd->ldc_handle, &(vd->inband_task.mhdl)); in vds_do_init_vd()
6851 if (mod_hash_insert(vds->vd_table, (mod_hash_key_t)id, vd) != 0) { in vds_do_init_vd()
6857 vd->state = VD_STATE_INIT; in vds_do_init_vd()
6894 vd_t *vd = (vd_t *)arg; in vds_destroy_vd() local
6897 if (vd == NULL) in vds_destroy_vd()
6903 if (vd->initialized & VD_LOCKING) { in vds_destroy_vd()
6904 mutex_enter(&vd->lock); in vds_destroy_vd()
6905 vd->enabled = 0; in vds_destroy_vd()
6906 mutex_exit(&vd->lock); in vds_destroy_vd()
6910 if (vd->startq != NULL) in vds_destroy_vd()
6911 ddi_taskq_destroy(vd->startq); /* waits for queued tasks */ in vds_destroy_vd()
6914 if (vd->ioq != NULL) in vds_destroy_vd()
6915 ddi_taskq_destroy(vd->ioq); in vds_destroy_vd()
6918 if (vd->completionq != NULL) in vds_destroy_vd()
6919 ddi_taskq_destroy(vd->completionq); /* waits for tasks */ in vds_destroy_vd()
6921 vd_free_dring_task(vd); in vds_destroy_vd()
6924 (void) ldc_mem_free_handle(vd->inband_task.mhdl); in vds_destroy_vd()
6927 if (vd->initialized & VD_LDC) { in vds_destroy_vd()
6929 if (vd->initialized & VD_DRING) in vds_destroy_vd()
6930 (void) ldc_mem_dring_unmap(vd->dring_handle); in vds_destroy_vd()
6933 while ((rv = ldc_close(vd->ldc_handle)) == EAGAIN) { in vds_destroy_vd()
6941 (void) ldc_unreg_callback(vd->ldc_handle); in vds_destroy_vd()
6942 (void) ldc_fini(vd->ldc_handle); in vds_destroy_vd()
6951 (void) ldc_set_cb_mode(vd->ldc_handle, LDC_CB_DISABLE); in vds_destroy_vd()
6952 while (ldc_unreg_callback(vd->ldc_handle) == EAGAIN) in vds_destroy_vd()
6958 if (vd->vio_msgp != NULL) { in vds_destroy_vd()
6959 kmem_free(vd->vio_msgp, vd->max_msglen); in vds_destroy_vd()
6960 vd->vio_msgp = NULL; in vds_destroy_vd()
6964 if (vd->inband_task.msg != NULL) { in vds_destroy_vd()
6965 kmem_free(vd->inband_task.msg, vd->max_msglen); in vds_destroy_vd()
6966 vd->inband_task.msg = NULL; in vds_destroy_vd()
6969 if (vd->file) { in vds_destroy_vd()
6971 (void) VOP_CLOSE(vd->file_vnode, vd->open_flags, 1, in vds_destroy_vd()
6973 VN_RELE(vd->file_vnode); in vds_destroy_vd()
6977 if (vd->ldi_handle[slice] != NULL) { in vds_destroy_vd()
6979 (void) ldi_close(vd->ldi_handle[slice], in vds_destroy_vd()
6980 vd->open_flags, kcred); in vds_destroy_vd()
6986 if (vd->dskimg_devid != NULL) in vds_destroy_vd()
6987 ddi_devid_free(vd->dskimg_devid); in vds_destroy_vd()
6990 if (vd->flabel) { in vds_destroy_vd()
6991 kmem_free(vd->flabel, vd->flabel_size); in vds_destroy_vd()
6992 vd->flabel = NULL; in vds_destroy_vd()
6993 vd->flabel_size = 0; in vds_destroy_vd()
6997 if (vd->initialized & VD_LOCKING) in vds_destroy_vd()
6998 mutex_destroy(&vd->lock); in vds_destroy_vd()
7001 kmem_free(vd, sizeof (*vd)); in vds_destroy_vd()
7009 vd_t *vd = NULL; in vds_init_vd() local
7013 ldc_id, &vd)) != 0) in vds_init_vd()
7014 vds_destroy_vd(vd); in vds_init_vd()