xref: /minix/sys/fs/udf/udf_readwrite.c (revision 0a6a1f1d)
1 /* $NetBSD: udf_readwrite.c,v 1.11 2011/06/12 03:35:55 rmind Exp $ */
2 
3 /*
4  * Copyright (c) 2007, 2008 Reinoud Zandijk
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  *
27  */
28 
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_readwrite.c,v 1.11 2011/06/12 03:35:55 rmind Exp $");
32 #endif /* not lint */
33 
34 
35 #if defined(_KERNEL_OPT)
36 #include "opt_compat_netbsd.h"
37 #endif
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/sysctl.h>
42 #include <sys/namei.h>
43 #include <sys/proc.h>
44 #include <sys/kernel.h>
45 #include <sys/vnode.h>
46 #include <miscfs/genfs/genfs_node.h>
47 #include <sys/mount.h>
48 #include <sys/buf.h>
49 #include <sys/file.h>
50 #include <sys/device.h>
51 #include <sys/disklabel.h>
52 #include <sys/ioctl.h>
53 #include <sys/malloc.h>
54 #include <sys/dirent.h>
55 #include <sys/stat.h>
56 #include <sys/conf.h>
57 #include <sys/kauth.h>
58 #include <sys/kthread.h>
59 #include <dev/clock_subr.h>
60 
61 #include <fs/udf/ecma167-udf.h>
62 #include <fs/udf/udf_mount.h>
63 
64 #include "udf.h"
65 #include "udf_subr.h"
66 #include "udf_bswap.h"
67 
68 
69 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
70 
71 /* --------------------------------------------------------------------- */
72 
73 void
74 udf_fixup_fid_block(uint8_t *blob, int lb_size,
75 	int rfix_pos, int max_rfix_pos, uint32_t lb_num)
76 {
77 	struct fileid_desc *fid;
78 	uint8_t *fid_pos;
79 	int fid_len, found;
80 
81 	/* needs to be word aligned */
82 	KASSERT(rfix_pos % 4 == 0);
83 
84 	/* first resync with the FID stream !!! */
85 	found = 0;
86 	while (rfix_pos + sizeof(struct desc_tag) <= max_rfix_pos) {
87 		fid_pos = blob + rfix_pos;
88 		fid = (struct fileid_desc *) fid_pos;
89 		if (udf_rw16(fid->tag.id) == TAGID_FID) {
90 			if (udf_check_tag((union dscrptr *) fid) == 0)
91 				found = 1;
92 		}
93 		if (found)
94 			break;
95 		/* try next location; can only be 4 bytes aligned */
96 		rfix_pos += 4;
97 	}
98 
99 	/* walk over the fids */
100 	fid_pos = blob + rfix_pos;
101 	while (rfix_pos + sizeof(struct desc_tag) <= max_rfix_pos) {
102 		fid = (struct fileid_desc *) fid_pos;
103 		if (udf_rw16(fid->tag.id) != TAGID_FID) {
104 			/* end of FID stream; end of directory or currupted */
105 			break;
106 		}
107 
108 		/* update sector number and recalculate checkum */
109 		fid->tag.tag_loc = udf_rw32(lb_num);
110 		udf_validate_tag_sum((union dscrptr *) fid);
111 
112 		/* if the FID crosses the memory, we're done! */
113 		if (rfix_pos + UDF_FID_SIZE >= max_rfix_pos)
114 			break;
115 
116 		fid_len = udf_fidsize(fid);
117 		fid_pos  += fid_len;
118 		rfix_pos += fid_len;
119 	}
120 }
121 
122 
123 void
124 udf_fixup_internal_extattr(uint8_t *blob, uint32_t lb_num)
125 {
126 	struct desc_tag        *tag;
127 	struct file_entry      *fe;
128 	struct extfile_entry   *efe;
129 	struct extattrhdr_desc *eahdr;
130 	int l_ea;
131 
132 	/* get information from fe/efe */
133 	tag = (struct desc_tag *) blob;
134 	switch (udf_rw16(tag->id)) {
135 	case TAGID_FENTRY :
136 		fe = (struct file_entry *) blob;
137 		l_ea  = udf_rw32(fe->l_ea);
138 		eahdr = (struct extattrhdr_desc *) fe->data;
139 		break;
140 	case TAGID_EXTFENTRY :
141 		efe = (struct extfile_entry *) blob;
142 		l_ea  = udf_rw32(efe->l_ea);
143 		eahdr = (struct extattrhdr_desc *) efe->data;
144 		break;
145 	case TAGID_INDIRECTENTRY :
146 	case TAGID_ALLOCEXTENT :
147 	case TAGID_EXTATTR_HDR :
148 		return;
149 	default:
150 		panic("%s: passed bad tag\n", __func__);
151 	}
152 
153 	/* something recorded here? (why am i called?) */
154 	if (l_ea == 0)
155 		return;
156 
157 #if 0
158 	/* check extended attribute tag */
159 	/* TODO XXX what to do when we encounter an error here? */
160 	error = udf_check_tag(eahdr);
161 	if (error)
162 		return;	/* for now */
163 	if (udf_rw16(eahdr->tag.id) != TAGID_EXTATTR_HDR)
164 		return;	/* for now */
165 	error = udf_check_tag_payload(eahdr, sizeof(struct extattrhdr_desc));
166 	if (error)
167 		return; /* for now */
168 #endif
169 
170 	DPRINTF(EXTATTR, ("node fixup: found %d bytes of extended attributes\n",
171 		l_ea));
172 
173 	/* fixup eahdr tag */
174 	eahdr->tag.tag_loc = udf_rw32(lb_num);
175 	udf_validate_tag_and_crc_sums((union dscrptr *) eahdr);
176 }
177 
178 
179 void
180 udf_fixup_node_internals(struct udf_mount *ump, uint8_t *blob, int udf_c_type)
181 {
182 	struct desc_tag *tag, *sbm_tag;
183 	struct file_entry *fe;
184 	struct extfile_entry *efe;
185 	struct alloc_ext_entry *ext;
186 	uint32_t lb_size, lb_num;
187 	uint32_t intern_pos, max_intern_pos;
188 	int icbflags, addr_type, file_type, intern, has_fids, has_sbm, l_ea;
189 
190 	lb_size = udf_rw32(ump->logical_vol->lb_size);
191 	/* if its not a node we're done */
192 	if (udf_c_type != UDF_C_NODE)
193 		return;
194 
195 	/* NOTE this could also be done in write_internal */
196 	/* start of a descriptor */
197 	l_ea      = 0;
198 	has_fids  = 0;
199 	has_sbm   = 0;
200 	intern    = 0;
201 	file_type = 0;
202 	max_intern_pos = intern_pos = lb_num = 0;	/* shut up gcc! */
203 
204 	tag = (struct desc_tag *) blob;
205 	switch (udf_rw16(tag->id)) {
206 	case TAGID_FENTRY :
207 		fe = (struct file_entry *) tag;
208 		l_ea = udf_rw32(fe->l_ea);
209 		icbflags  = udf_rw16(fe->icbtag.flags);
210 		addr_type = (icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK);
211 		file_type = fe->icbtag.file_type;
212 		intern = (addr_type == UDF_ICB_INTERN_ALLOC);
213 		intern_pos  = UDF_FENTRY_SIZE + l_ea;
214 		max_intern_pos = intern_pos + udf_rw64(fe->inf_len);
215 		lb_num = udf_rw32(fe->tag.tag_loc);
216 		break;
217 	case TAGID_EXTFENTRY :
218 		efe = (struct extfile_entry *) tag;
219 		l_ea = udf_rw32(efe->l_ea);
220 		icbflags  = udf_rw16(efe->icbtag.flags);
221 		addr_type = (icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK);
222 		file_type = efe->icbtag.file_type;
223 		intern = (addr_type == UDF_ICB_INTERN_ALLOC);
224 		intern_pos  = UDF_EXTFENTRY_SIZE + l_ea;
225 		max_intern_pos = intern_pos + udf_rw64(efe->inf_len);
226 		lb_num = udf_rw32(efe->tag.tag_loc);
227 		break;
228 	case TAGID_INDIRECTENTRY :
229 	case TAGID_EXTATTR_HDR :
230 		break;
231 	case TAGID_ALLOCEXTENT :
232 		/* force crclen to 8 for UDF version < 2.01 */
233 		ext = (struct alloc_ext_entry *) tag;
234 		if (udf_rw16(ump->logvol_info->min_udf_readver) <= 0x200)
235 			ext->tag.desc_crc_len = udf_rw16(8);
236 		break;
237 	default:
238 		panic("%s: passed bad tag\n", __func__);
239 		break;
240 	}
241 
242 	/* determine what to fix if its internally recorded */
243 	if (intern) {
244 		has_fids = (file_type == UDF_ICB_FILETYPE_DIRECTORY) ||
245 			   (file_type == UDF_ICB_FILETYPE_STREAMDIR);
246 		has_sbm  = (file_type == UDF_ICB_FILETYPE_META_BITMAP);
247 	}
248 
249 	/* fixup internal extended attributes if present */
250 	if (l_ea)
251 		udf_fixup_internal_extattr(blob, lb_num);
252 
253 	/* fixup fids lb numbers */
254 	if (has_fids)
255 		udf_fixup_fid_block(blob, lb_size, intern_pos,
256 			max_intern_pos, lb_num);
257 
258 	/* fixup space bitmap descriptor */
259 	if (has_sbm) {
260 		sbm_tag = (struct desc_tag *) (blob + intern_pos);
261 		sbm_tag->tag_loc = tag->tag_loc;
262 		udf_validate_tag_and_crc_sums((uint8_t *) sbm_tag);
263 	}
264 
265 	udf_validate_tag_and_crc_sums(blob);
266 }
267 
268 /* --------------------------------------------------------------------- */
269 
270 /*
271  * Set of generic descriptor readers and writers and their helper functions.
272  * Descriptors inside `logical space' i.e. inside logically mapped partitions
273  * can never be longer than one logical sector.
274  *
275  * NOTE that these functions *can* be used by the sheduler backends to read
276  * node descriptors too.
277  *
278  * For reading, the size of allocated piece is returned in multiple of sector
279  * size due to udf_calc_udf_malloc_size().
280  */
281 
282 
283 /* SYNC reading of n blocks from specified sector */
284 int
285 udf_read_phys_sectors(struct udf_mount *ump, int what, void *blob,
286 	uint32_t start, uint32_t sectors)
287 {
288 	struct buf *buf, *nestbuf;
289 	uint32_t buf_offset;
290 	off_t lblkno, rblkno;
291 	int sector_size = ump->discinfo.sector_size;
292 	int blks = sector_size / DEV_BSIZE;
293 	int piece;
294 	int error;
295 
296 	DPRINTF(READ, ("udf_intbreadn() : sectors = %d, sector_size = %d\n",
297 		sectors, sector_size));
298 	buf = getiobuf(ump->devvp, true);
299 	buf->b_flags    = B_READ;
300 	buf->b_cflags   = BC_BUSY;	/* needed? */
301 	buf->b_iodone   = NULL;
302 	buf->b_data     = blob;
303 	buf->b_bcount   = sectors * sector_size;
304 	buf->b_resid    = buf->b_bcount;
305 	buf->b_bufsize  = buf->b_bcount;
306 	buf->b_private  = NULL;	/* not needed yet */
307 	BIO_SETPRIO(buf, BPRIO_DEFAULT);
308 	buf->b_lblkno   = buf->b_blkno = buf->b_rawblkno = start * blks;
309 	buf->b_proc     = NULL;
310 
311 	error = 0;
312 	buf_offset = 0;
313 	rblkno = start;
314 	lblkno = 0;
315 	while ((sectors > 0) && (error == 0)) {
316 		piece = MIN(MAXPHYS/sector_size, sectors);
317 		DPRINTF(READ, ("read in %d + %d\n", (uint32_t) rblkno, piece));
318 
319 		nestbuf = getiobuf(NULL, true);
320 		nestiobuf_setup(buf, nestbuf, buf_offset, piece * sector_size);
321 		/* nestbuf is B_ASYNC */
322 
323 		/* identify this nestbuf */
324 		nestbuf->b_lblkno   = lblkno;
325 
326 		/* CD shedules on raw blkno */
327 		nestbuf->b_blkno      = rblkno * blks;
328 		nestbuf->b_proc       = NULL;
329 		nestbuf->b_rawblkno   = rblkno * blks;
330 		nestbuf->b_udf_c_type = what;
331 
332 		udf_discstrat_queuebuf(ump, nestbuf);
333 
334 		lblkno     += piece;
335 		rblkno     += piece;
336 		buf_offset += piece * sector_size;
337 		sectors    -= piece;
338 	}
339 	error = biowait(buf);
340 	putiobuf(buf);
341 
342 	return error;
343 }
344 
345 
346 /* synchronous generic descriptor read */
347 int
348 udf_read_phys_dscr(struct udf_mount *ump, uint32_t sector,
349 		    struct malloc_type *mtype, union dscrptr **dstp)
350 {
351 	union dscrptr *dst, *new_dst;
352 	uint8_t *pos;
353 	int sectors, dscrlen;
354 	int i, error, sector_size;
355 
356 	sector_size = ump->discinfo.sector_size;
357 
358 	*dstp = dst = NULL;
359 	dscrlen = sector_size;
360 
361 	/* read initial piece */
362 	dst = malloc(sector_size, mtype, M_WAITOK);
363 	error = udf_read_phys_sectors(ump, UDF_C_DSCR, dst, sector, 1);
364 	DPRINTFIF(DESCRIPTOR, error, ("read error (%d)\n", error));
365 
366 	if (!error) {
367 		/* check if its a valid tag */
368 		error = udf_check_tag(dst);
369 		if (error) {
370 			/* check if its an empty block */
371 			pos = (uint8_t *) dst;
372 			for (i = 0; i < sector_size; i++, pos++) {
373 				if (*pos) break;
374 			}
375 			if (i == sector_size) {
376 				/* return no error but with no dscrptr */
377 				/* dispose first block */
378 				free(dst, mtype);
379 				return 0;
380 			}
381 		}
382 		/* calculate descriptor size */
383 		dscrlen = udf_tagsize(dst, sector_size);
384 	}
385 	DPRINTFIF(DESCRIPTOR, error, ("bad tag checksum\n"));
386 
387 	if (!error && (dscrlen > sector_size)) {
388 		DPRINTF(DESCRIPTOR, ("multi block descriptor read\n"));
389 		/*
390 		 * Read the rest of descriptor. Since it is only used at mount
391 		 * time its overdone to define and use a specific udf_intbreadn
392 		 * for this alone.
393 		 */
394 
395 		new_dst = realloc(dst, dscrlen, mtype, M_WAITOK);
396 		if (new_dst == NULL) {
397 			free(dst, mtype);
398 			return ENOMEM;
399 		}
400 		dst = new_dst;
401 
402 		sectors = (dscrlen + sector_size -1) / sector_size;
403 		DPRINTF(DESCRIPTOR, ("dscrlen = %d (%d blk)\n", dscrlen, sectors));
404 
405 		pos = (uint8_t *) dst + sector_size;
406 		error = udf_read_phys_sectors(ump, UDF_C_DSCR, pos,
407 				sector + 1, sectors-1);
408 
409 		DPRINTFIF(DESCRIPTOR, error, ("read error on multi (%d)\n",
410 		    error));
411 	}
412 	if (!error) {
413 		error = udf_check_tag_payload(dst, dscrlen);
414 		DPRINTFIF(DESCRIPTOR, error, ("bad payload check sum\n"));
415 	}
416 	if (error && dst) {
417 		free(dst, mtype);
418 		dst = NULL;
419 	}
420 	*dstp = dst;
421 
422 	return error;
423 }
424 
425 
426 static void
427 udf_write_phys_buf(struct udf_mount *ump, int what, struct buf *buf)
428 {
429 	struct buf *nestbuf;
430 	uint32_t buf_offset;
431 	off_t lblkno, rblkno;
432 	int sector_size = ump->discinfo.sector_size;
433 	int blks = sector_size / DEV_BSIZE;
434 	uint32_t sectors;
435 	int piece;
436 	int error;
437 
438 	sectors = buf->b_bcount / sector_size;
439 	DPRINTF(WRITE, ("udf_intbwriten() : sectors = %d, sector_size = %d\n",
440 		sectors, sector_size));
441 
442 	/* don't forget to increase pending count for the bwrite itself */
443 /* panic("NO WRITING\n"); */
444 	if (buf->b_vp) {
445 		mutex_enter(buf->b_vp->v_interlock);
446 		buf->b_vp->v_numoutput++;
447 		mutex_exit(buf->b_vp->v_interlock);
448 	}
449 
450 	error = 0;
451 	buf_offset = 0;
452 	rblkno = buf->b_blkno / blks;
453 	lblkno = 0;
454 	while ((sectors > 0) && (error == 0)) {
455 		piece = MIN(MAXPHYS/sector_size, sectors);
456 		DPRINTF(WRITE, ("write out %d + %d\n",
457 		    (uint32_t) rblkno, piece));
458 
459 		nestbuf = getiobuf(NULL, true);
460 		nestiobuf_setup(buf, nestbuf, buf_offset, piece * sector_size);
461 		/* nestbuf is B_ASYNC */
462 
463 		/* identify this nestbuf */
464 		nestbuf->b_lblkno   = lblkno;
465 
466 		/* CD shedules on raw blkno */
467 		nestbuf->b_blkno      = rblkno * blks;
468 		nestbuf->b_proc       = NULL;
469 		nestbuf->b_rawblkno   = rblkno * blks;
470 		nestbuf->b_udf_c_type = what;
471 
472 		udf_discstrat_queuebuf(ump, nestbuf);
473 
474 		lblkno     += piece;
475 		rblkno     += piece;
476 		buf_offset += piece * sector_size;
477 		sectors    -= piece;
478 	}
479 }
480 
481 
482 /* SYNC writing of n blocks from specified sector */
483 int
484 udf_write_phys_sectors(struct udf_mount *ump, int what, void *blob,
485 	uint32_t start, uint32_t sectors)
486 {
487 	struct vnode *vp;
488 	struct buf *buf;
489 	int sector_size = ump->discinfo.sector_size;
490 	int blks = sector_size / DEV_BSIZE;
491 	int error;
492 
493 	/* get transfer buffer */
494 	vp = ump->devvp;
495 	buf = getiobuf(vp, true);
496 	buf->b_flags    = B_WRITE;
497 	buf->b_cflags   = BC_BUSY;	/* needed? */
498 	buf->b_iodone   = NULL;
499 	buf->b_data     = blob;
500 	buf->b_bcount   = sectors * sector_size;
501 	buf->b_resid    = buf->b_bcount;
502 	buf->b_bufsize  = buf->b_bcount;
503 	buf->b_private  = NULL;	/* not needed yet */
504 	BIO_SETPRIO(buf, BPRIO_DEFAULT);
505 	buf->b_lblkno   = buf->b_blkno = buf->b_rawblkno = start * blks;
506 	buf->b_proc     = NULL;
507 
508 	/* do the write, wait and return error */
509 	udf_write_phys_buf(ump, what, buf);
510 	error = biowait(buf);
511 	putiobuf(buf);
512 
513 	return error;
514 }
515 
516 
517 /* synchronous generic descriptor write */
518 int
519 udf_write_phys_dscr_sync(struct udf_mount *ump, struct udf_node *udf_node, int what,
520 		     union dscrptr *dscr, uint32_t sector, uint32_t logsector)
521 {
522 	struct vnode *vp;
523 	struct buf *buf;
524 	int sector_size = ump->discinfo.sector_size;
525 	int blks = sector_size / DEV_BSIZE;
526 	int dscrlen;
527 	int error;
528 
529 	/* set sector number in the descriptor and validate */
530 	dscr->tag.tag_loc = udf_rw32(logsector);
531 	udf_validate_tag_and_crc_sums(dscr);
532 
533 	/* calculate descriptor size */
534 	dscrlen = udf_tagsize(dscr, sector_size);
535 
536 	/* get transfer buffer */
537 	vp = udf_node ? udf_node->vnode : ump->devvp;
538 	buf = getiobuf(vp, true);
539 	buf->b_flags    = B_WRITE;
540 	buf->b_cflags   = BC_BUSY;	/* needed? */
541 	buf->b_iodone   = NULL;
542 	buf->b_data     = (void *) dscr;
543 	buf->b_bcount   = dscrlen;
544 	buf->b_resid    = buf->b_bcount;
545 	buf->b_bufsize  = buf->b_bcount;
546 	buf->b_private  = NULL;	/* not needed yet */
547 	BIO_SETPRIO(buf, BPRIO_DEFAULT);
548 	buf->b_lblkno   = buf->b_blkno = buf->b_rawblkno = sector * blks;
549 	buf->b_proc     = NULL;
550 
551 	/* do the write, wait and return error */
552 	udf_write_phys_buf(ump, what, buf);
553 	error = biowait(buf);
554 	putiobuf(buf);
555 
556 	return error;
557 }
558 
559 
560 /* asynchronous generic descriptor write */
561 int
562 udf_write_phys_dscr_async(struct udf_mount *ump, struct udf_node *udf_node,
563 		      int what, union dscrptr *dscr,
564 		      uint32_t sector, uint32_t logsector,
565 		      void (*dscrwr_callback)(struct buf *))
566 {
567 	struct vnode *vp;
568 	struct buf *buf;
569 	int dscrlen;
570 	int sector_size = ump->discinfo.sector_size;
571 	int blks = sector_size / DEV_BSIZE;
572 
573 	KASSERT(dscrwr_callback);
574 	DPRINTF(NODE, ("udf_write_phys_dscr_async() called\n"));
575 
576 	/* set sector number in the descriptor and validate */
577 	dscr->tag.tag_loc = udf_rw32(logsector);
578 	udf_validate_tag_and_crc_sums(dscr);
579 
580 	/* calculate descriptor size */
581 	dscrlen = udf_tagsize(dscr, sector_size);
582 
583 	/* get transfer buffer */
584 	vp = udf_node ? udf_node->vnode : ump->devvp;
585 	buf = getiobuf(vp, true);
586 	buf->b_flags    = B_WRITE; // | B_ASYNC;
587 	buf->b_cflags   = BC_BUSY;
588 	buf->b_iodone	= dscrwr_callback;
589 	buf->b_data     = dscr;
590 	buf->b_bcount   = dscrlen;
591 	buf->b_resid    = buf->b_bcount;
592 	buf->b_bufsize  = buf->b_bcount;
593 	buf->b_private  = NULL;	/* not needed yet */
594 	BIO_SETPRIO(buf, BPRIO_DEFAULT);
595 	buf->b_lblkno   = buf->b_blkno = buf->b_rawblkno = sector * blks;
596 	buf->b_proc     = NULL;
597 
598 	/* do the write and return no error */
599 	udf_write_phys_buf(ump, what, buf);
600 	return 0;
601 }
602 
603 /* --------------------------------------------------------------------- */
604 
605 /* disc strategy dispatchers */
606 
607 int
608 udf_create_logvol_dscr(struct udf_mount *ump, struct udf_node *udf_node, struct long_ad *icb,
609 	union dscrptr **dscrptr)
610 {
611 	struct udf_strategy *strategy = ump->strategy;
612 	struct udf_strat_args args;
613 	int error;
614 
615 	KASSERT(strategy);
616 	args.ump  = ump;
617 	args.udf_node = udf_node;
618 	args.icb  = icb;
619 	args.dscr = NULL;
620 
621 	error = (strategy->create_logvol_dscr)(&args);
622 	*dscrptr = args.dscr;
623 
624 	return error;
625 }
626 
627 
628 void
629 udf_free_logvol_dscr(struct udf_mount *ump, struct long_ad *icb,
630 	void *dscr)
631 {
632 	struct udf_strategy *strategy = ump->strategy;
633 	struct udf_strat_args args;
634 
635 	KASSERT(strategy);
636 	args.ump  = ump;
637 	args.icb  = icb;
638 	args.dscr = dscr;
639 
640 	(strategy->free_logvol_dscr)(&args);
641 }
642 
643 
644 int
645 udf_read_logvol_dscr(struct udf_mount *ump, struct long_ad *icb,
646 	union dscrptr **dscrptr)
647 {
648 	struct udf_strategy *strategy = ump->strategy;
649 	struct udf_strat_args args;
650 	int error;
651 
652 	KASSERT(strategy);
653 	args.ump  = ump;
654 	args.icb  = icb;
655 	args.dscr = NULL;
656 
657 	error = (strategy->read_logvol_dscr)(&args);
658 	*dscrptr = args.dscr;
659 
660 	return error;
661 }
662 
663 
664 int
665 udf_write_logvol_dscr(struct udf_node *udf_node, union dscrptr *dscr,
666 	struct long_ad *icb, int waitfor)
667 {
668 	struct udf_strategy *strategy = udf_node->ump->strategy;
669 	struct udf_strat_args args;
670 	int error;
671 
672 	KASSERT(strategy);
673 	args.ump      = udf_node->ump;
674 	args.udf_node = udf_node;
675 	args.icb      = icb;
676 	args.dscr     = dscr;
677 	args.waitfor  = waitfor;
678 
679 	error = (strategy->write_logvol_dscr)(&args);
680 	return error;
681 }
682 
683 
684 void
685 udf_discstrat_queuebuf(struct udf_mount *ump, struct buf *nestbuf)
686 {
687 	struct udf_strategy *strategy = ump->strategy;
688 	struct udf_strat_args args;
689 
690 	KASSERT(strategy);
691 	args.ump = ump;
692 	args.nestbuf = nestbuf;
693 
694 	(strategy->queuebuf)(&args);
695 }
696 
697 
698 void
699 udf_discstrat_init(struct udf_mount *ump)
700 {
701 	struct udf_strategy *strategy = ump->strategy;
702 	struct udf_strat_args args;
703 
704 	KASSERT(strategy);
705 	args.ump = ump;
706 	(strategy->discstrat_init)(&args);
707 }
708 
709 
710 void udf_discstrat_finish(struct udf_mount *ump)
711 {
712 	struct udf_strategy *strategy = ump->strategy;
713 	struct udf_strat_args args;
714 
715 	/* strategy might not have been set, so ignore if not set */
716 	if (strategy) {
717 		args.ump = ump;
718 		(strategy->discstrat_finish)(&args);
719 	}
720 }
721 
722 /* --------------------------------------------------------------------- */
723 
724