1 /* $NetBSD: udf_strat_direct.c,v 1.12 2013/10/30 08:41:38 mrg Exp $ */ 2 3 /* 4 * Copyright (c) 2006, 2008 Reinoud Zandijk 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * 27 */ 28 29 #include <sys/cdefs.h> 30 #ifndef lint 31 __KERNEL_RCSID(0, "$NetBSD: udf_strat_direct.c,v 1.12 2013/10/30 08:41:38 mrg Exp $"); 32 #endif /* not lint */ 33 34 35 #if defined(_KERNEL_OPT) 36 #include "opt_compat_netbsd.h" 37 #endif 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/sysctl.h> 42 #include <sys/namei.h> 43 #include <sys/proc.h> 44 #include <sys/kernel.h> 45 #include <sys/vnode.h> 46 #include <miscfs/genfs/genfs_node.h> 47 #include <sys/mount.h> 48 #include <sys/buf.h> 49 #include <sys/file.h> 50 #include <sys/device.h> 51 #include <sys/disklabel.h> 52 #include <sys/ioctl.h> 53 #include <sys/malloc.h> 54 #include <sys/dirent.h> 55 #include <sys/stat.h> 56 #include <sys/conf.h> 57 #include <sys/kauth.h> 58 #include <sys/kthread.h> 59 #include <dev/clock_subr.h> 60 61 #include <fs/udf/ecma167-udf.h> 62 #include <fs/udf/udf_mount.h> 63 64 #include "udf.h" 65 #include "udf_subr.h" 66 #include "udf_bswap.h" 67 68 69 #define VTOI(vnode) ((struct udf_node *) vnode->v_data) 70 #define PRIV(ump) ((struct strat_private *) ump->strategy_private) 71 72 /* --------------------------------------------------------------------- */ 73 74 /* BUFQ's */ 75 #define UDF_SHED_MAX 3 76 77 #define UDF_SHED_READING 0 78 #define UDF_SHED_WRITING 1 79 #define UDF_SHED_SEQWRITING 2 80 81 82 struct strat_private { 83 struct pool desc_pool; /* node descriptors */ 84 }; 85 86 /* --------------------------------------------------------------------- */ 87 88 static void 89 udf_wr_nodedscr_callback(struct buf *buf) 90 { 91 struct udf_node *udf_node; 92 93 KASSERT(buf); 94 KASSERT(buf->b_data); 95 96 /* called when write action is done */ 97 DPRINTF(WRITE, ("udf_wr_nodedscr_callback(): node written out\n")); 98 99 udf_node = VTOI(buf->b_vp); 100 if (udf_node == NULL) { 101 putiobuf(buf); 102 printf("udf_wr_node_callback: NULL node?\n"); 103 return; 104 } 105 106 /* XXX right flags to mark dirty again on error? */ 107 if (buf->b_error) { 108 /* write error on `defect free' media??? how to solve? */ 109 /* XXX lookup UDF standard for unallocatable space */ 110 udf_node->i_flags |= IN_MODIFIED | IN_ACCESSED; 111 } 112 113 /* decrement outstanding_nodedscr */ 114 KASSERT(udf_node->outstanding_nodedscr >= 1); 115 udf_node->outstanding_nodedscr--; 116 if (udf_node->outstanding_nodedscr == 0) { 117 /* unlock the node */ 118 UDF_UNLOCK_NODE(udf_node, 0); 119 wakeup(&udf_node->outstanding_nodedscr); 120 } 121 /* unreference the vnode so it can be recycled */ 122 holdrele(udf_node->vnode); 123 124 putiobuf(buf); 125 } 126 127 /* --------------------------------------------------------------------- */ 128 129 static int 130 udf_getblank_nodedscr_direct(struct udf_strat_args *args) 131 { 132 union dscrptr **dscrptr = &args->dscr; 133 struct udf_mount *ump = args->ump; 134 struct strat_private *priv = PRIV(ump); 135 uint32_t lb_size; 136 137 lb_size = udf_rw32(ump->logical_vol->lb_size); 138 *dscrptr = pool_get(&priv->desc_pool, PR_WAITOK); 139 memset(*dscrptr, 0, lb_size); 140 141 return 0; 142 } 143 144 145 static void 146 udf_free_nodedscr_direct(struct udf_strat_args *args) 147 { 148 union dscrptr *dscr = args->dscr; 149 struct udf_mount *ump = args->ump; 150 struct strat_private *priv = PRIV(ump); 151 152 pool_put(&priv->desc_pool, dscr); 153 } 154 155 156 static int 157 udf_read_nodedscr_direct(struct udf_strat_args *args) 158 { 159 union dscrptr **dscrptr = &args->dscr; 160 union dscrptr *tmpdscr; 161 struct udf_mount *ump = args->ump; 162 struct long_ad *icb = args->icb; 163 struct strat_private *priv = PRIV(ump); 164 uint32_t lb_size; 165 uint32_t sector, dummy; 166 int error; 167 168 lb_size = udf_rw32(ump->logical_vol->lb_size); 169 170 error = udf_translate_vtop(ump, icb, §or, &dummy); 171 if (error) 172 return error; 173 174 /* try to read in fe/efe */ 175 error = udf_read_phys_dscr(ump, sector, M_UDFTEMP, &tmpdscr); 176 if (error) 177 return error; 178 179 *dscrptr = pool_get(&priv->desc_pool, PR_WAITOK); 180 memcpy(*dscrptr, tmpdscr, lb_size); 181 free(tmpdscr, M_UDFTEMP); 182 183 return 0; 184 } 185 186 187 static int 188 udf_write_nodedscr_direct(struct udf_strat_args *args) 189 { 190 struct udf_mount *ump = args->ump; 191 struct udf_node *udf_node = args->udf_node; 192 union dscrptr *dscr = args->dscr; 193 struct long_ad *icb = args->icb; 194 int waitfor = args->waitfor; 195 uint32_t logsector, sector, dummy; 196 int error, vpart __diagused; 197 198 /* 199 * we have to decide if we write it out sequential or at its fixed 200 * position by examining the partition its (to be) written on. 201 */ 202 vpart = udf_rw16(udf_node->loc.loc.part_num); 203 logsector = udf_rw32(icb->loc.lb_num); 204 KASSERT(ump->vtop_tp[vpart] != UDF_VTOP_TYPE_VIRT); 205 206 sector = 0; 207 error = udf_translate_vtop(ump, icb, §or, &dummy); 208 if (error) 209 goto out; 210 211 /* add reference to the vnode to prevent recycling */ 212 vhold(udf_node->vnode); 213 214 if (waitfor) { 215 DPRINTF(WRITE, ("udf_write_nodedscr: sync write\n")); 216 217 error = udf_write_phys_dscr_sync(ump, udf_node, UDF_C_NODE, 218 dscr, sector, logsector); 219 } else { 220 DPRINTF(WRITE, ("udf_write_nodedscr: no wait, async write\n")); 221 222 error = udf_write_phys_dscr_async(ump, udf_node, UDF_C_NODE, 223 dscr, sector, logsector, udf_wr_nodedscr_callback); 224 /* will be UNLOCKED in call back */ 225 return error; 226 } 227 228 holdrele(udf_node->vnode); 229 out: 230 udf_node->outstanding_nodedscr--; 231 if (udf_node->outstanding_nodedscr == 0) { 232 UDF_UNLOCK_NODE(udf_node, 0); 233 wakeup(&udf_node->outstanding_nodedscr); 234 } 235 236 return error; 237 } 238 239 /* --------------------------------------------------------------------- */ 240 241 static void 242 udf_queue_buf_direct(struct udf_strat_args *args) 243 { 244 struct udf_mount *ump = args->ump; 245 struct buf *buf = args->nestbuf; 246 struct buf *nestbuf; 247 struct desc_tag *tag; 248 struct long_ad *node_ad_cpy; 249 uint64_t *lmapping, *pmapping, *lmappos, run_start; 250 uint32_t sectornr; 251 uint32_t buf_offset, rbuflen, bpos; 252 uint16_t vpart_num; 253 uint8_t *fidblk; 254 off_t rblk; 255 int sector_size = ump->discinfo.sector_size; 256 int len, buf_len, sector, sectors, run_length; 257 int blks = sector_size / DEV_BSIZE; 258 int what, class __diagused, queue; 259 260 KASSERT(ump); 261 KASSERT(buf); 262 KASSERT(buf->b_iodone == nestiobuf_iodone); 263 264 what = buf->b_udf_c_type; 265 queue = UDF_SHED_READING; 266 if ((buf->b_flags & B_READ) == 0) { 267 /* writing */ 268 queue = UDF_SHED_SEQWRITING; 269 if (what == UDF_C_ABSOLUTE) 270 queue = UDF_SHED_WRITING; 271 if (what == UDF_C_DSCR) 272 queue = UDF_SHED_WRITING; 273 if (what == UDF_C_NODE) 274 queue = UDF_SHED_WRITING; 275 } 276 277 /* use disc sheduler */ 278 class = ump->discinfo.mmc_class; 279 KASSERT((class == MMC_CLASS_UNKN) || (class == MMC_CLASS_DISC) || 280 (ump->discinfo.mmc_cur & MMC_CAP_HW_DEFECTFREE) || 281 (ump->vfs_mountp->mnt_flag & MNT_RDONLY)); 282 283 #ifndef UDF_DEBUG 284 __USE(blks); 285 #endif 286 if (queue == UDF_SHED_READING) { 287 DPRINTF(SHEDULE, ("\nudf_issue_buf READ %p : sector %d type %d," 288 "b_resid %d, b_bcount %d, b_bufsize %d\n", 289 buf, (uint32_t) buf->b_blkno / blks, buf->b_udf_c_type, 290 buf->b_resid, buf->b_bcount, buf->b_bufsize)); 291 VOP_STRATEGY(ump->devvp, buf); 292 return; 293 } 294 295 296 if (queue == UDF_SHED_WRITING) { 297 DPRINTF(SHEDULE, ("\nudf_issue_buf WRITE %p : sector %d " 298 "type %d, b_resid %d, b_bcount %d, b_bufsize %d\n", 299 buf, (uint32_t) buf->b_blkno / blks, buf->b_udf_c_type, 300 buf->b_resid, buf->b_bcount, buf->b_bufsize)); 301 KASSERT(buf->b_udf_c_type == UDF_C_DSCR || 302 buf->b_udf_c_type == UDF_C_ABSOLUTE || 303 buf->b_udf_c_type == UDF_C_NODE); 304 udf_fixup_node_internals(ump, buf->b_data, buf->b_udf_c_type); 305 VOP_STRATEGY(ump->devvp, buf); 306 return; 307 } 308 309 /* UDF_SHED_SEQWRITING */ 310 KASSERT(queue == UDF_SHED_SEQWRITING); 311 DPRINTF(SHEDULE, ("\nudf_issue_buf SEQWRITE %p : sector XXXX " 312 "type %d, b_resid %d, b_bcount %d, b_bufsize %d\n", 313 buf, buf->b_udf_c_type, buf->b_resid, buf->b_bcount, 314 buf->b_bufsize)); 315 316 /* 317 * Buffers should not have been allocated to disc addresses yet on 318 * this queue. Note that a buffer can get multiple extents allocated. 319 * 320 * lmapping contains lb_num relative to base partition. 321 */ 322 lmapping = ump->la_lmapping; 323 node_ad_cpy = ump->la_node_ad_cpy; 324 325 /* logically allocate buf and map it in the file */ 326 udf_late_allocate_buf(ump, buf, lmapping, node_ad_cpy, &vpart_num); 327 328 /* if we have FIDs, fixup using the new allocation table */ 329 if (buf->b_udf_c_type == UDF_C_FIDS) { 330 buf_len = buf->b_bcount; 331 bpos = 0; 332 lmappos = lmapping; 333 while (buf_len) { 334 sectornr = *lmappos++; 335 len = MIN(buf_len, sector_size); 336 fidblk = (uint8_t *) buf->b_data + bpos; 337 udf_fixup_fid_block(fidblk, sector_size, 338 0, len, sectornr); 339 bpos += len; 340 buf_len -= len; 341 } 342 } 343 if (buf->b_udf_c_type == UDF_C_METADATA_SBM) { 344 if (buf->b_lblkno == 0) { 345 /* update the tag location inside */ 346 tag = (struct desc_tag *) buf->b_data; 347 tag->tag_loc = udf_rw32(*lmapping); 348 udf_validate_tag_and_crc_sums(buf->b_data); 349 } 350 } 351 udf_fixup_node_internals(ump, buf->b_data, buf->b_udf_c_type); 352 353 /* 354 * Translate new mappings in lmapping to pmappings and try to 355 * conglomerate extents to reduce the number of writes. 356 * 357 * pmapping to contain lb_nums as used for disc adressing. 358 */ 359 pmapping = ump->la_pmapping; 360 sectors = (buf->b_bcount + sector_size -1) / sector_size; 361 udf_translate_vtop_list(ump, sectors, vpart_num, lmapping, pmapping); 362 363 for (sector = 0; sector < sectors; sector++) { 364 buf_offset = sector * sector_size; 365 DPRINTF(WRITE, ("\tprocessing rel sector %d\n", sector)); 366 367 DPRINTF(WRITE, ("\tissue write sector %"PRIu64"\n", 368 pmapping[sector])); 369 370 run_start = pmapping[sector]; 371 run_length = 1; 372 while (sector < sectors-1) { 373 if (pmapping[sector+1] != pmapping[sector]+1) 374 break; 375 run_length++; 376 sector++; 377 } 378 379 /* nest an iobuf for the extent */ 380 rbuflen = run_length * sector_size; 381 rblk = run_start * (sector_size/DEV_BSIZE); 382 383 nestbuf = getiobuf(NULL, true); 384 nestiobuf_setup(buf, nestbuf, buf_offset, rbuflen); 385 /* nestbuf is B_ASYNC */ 386 387 /* identify this nestbuf */ 388 nestbuf->b_lblkno = sector; 389 assert(nestbuf->b_vp == buf->b_vp); 390 391 /* CD shedules on raw blkno */ 392 nestbuf->b_blkno = rblk; 393 nestbuf->b_proc = NULL; 394 nestbuf->b_rawblkno = rblk; 395 nestbuf->b_udf_c_type = UDF_C_PROCESSED; 396 397 VOP_STRATEGY(ump->devvp, nestbuf); 398 } 399 } 400 401 402 static void 403 udf_discstrat_init_direct(struct udf_strat_args *args) 404 { 405 struct udf_mount *ump = args->ump; 406 struct strat_private *priv = PRIV(ump); 407 uint32_t lb_size; 408 409 KASSERT(priv == NULL); 410 ump->strategy_private = malloc(sizeof(struct strat_private), 411 M_UDFTEMP, M_WAITOK); 412 priv = ump->strategy_private; 413 memset(priv, 0 , sizeof(struct strat_private)); 414 415 /* 416 * Initialise pool for descriptors associated with nodes. This is done 417 * in lb_size units though currently lb_size is dictated to be 418 * sector_size. 419 */ 420 memset(&priv->desc_pool, 0, sizeof(struct pool)); 421 422 lb_size = udf_rw32(ump->logical_vol->lb_size); 423 pool_init(&priv->desc_pool, lb_size, 0, 0, 0, "udf_desc_pool", NULL, 424 IPL_NONE); 425 } 426 427 428 static void 429 udf_discstrat_finish_direct(struct udf_strat_args *args) 430 { 431 struct udf_mount *ump = args->ump; 432 struct strat_private *priv = PRIV(ump); 433 434 /* destroy our pool */ 435 pool_destroy(&priv->desc_pool); 436 437 /* free our private space */ 438 free(ump->strategy_private, M_UDFTEMP); 439 ump->strategy_private = NULL; 440 } 441 442 /* --------------------------------------------------------------------- */ 443 444 struct udf_strategy udf_strat_direct = 445 { 446 udf_getblank_nodedscr_direct, 447 udf_free_nodedscr_direct, 448 udf_read_nodedscr_direct, 449 udf_write_nodedscr_direct, 450 udf_queue_buf_direct, 451 udf_discstrat_init_direct, 452 udf_discstrat_finish_direct 453 }; 454 455