1 /* $NetBSD: udf_strat_direct.c,v 1.15 2022/01/15 10:55:53 msaitoh Exp $ */ 2 3 /* 4 * Copyright (c) 2006, 2008 Reinoud Zandijk 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * 27 */ 28 29 #include <sys/cdefs.h> 30 #ifndef lint 31 __KERNEL_RCSID(0, "$NetBSD: udf_strat_direct.c,v 1.15 2022/01/15 10:55:53 msaitoh Exp $"); 32 #endif /* not lint */ 33 34 35 #if defined(_KERNEL_OPT) 36 #include "opt_compat_netbsd.h" 37 #endif 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/sysctl.h> 42 #include <sys/namei.h> 43 #include <sys/proc.h> 44 #include <sys/kernel.h> 45 #include <sys/vnode.h> 46 #include <miscfs/genfs/genfs_node.h> 47 #include <sys/mount.h> 48 #include <sys/buf.h> 49 #include <sys/file.h> 50 #include <sys/device.h> 51 #include <sys/disklabel.h> 52 #include <sys/ioctl.h> 53 #include <sys/malloc.h> 54 #include <sys/dirent.h> 55 #include <sys/stat.h> 56 #include <sys/conf.h> 57 #include <sys/kauth.h> 58 #include <sys/kthread.h> 59 #include <dev/clock_subr.h> 60 61 #include <fs/udf/ecma167-udf.h> 62 #include <fs/udf/udf_mount.h> 63 64 #include "udf.h" 65 #include "udf_subr.h" 66 #include "udf_bswap.h" 67 68 69 #define VTOI(vnode) ((struct udf_node *) vnode->v_data) 70 #define PRIV(ump) ((struct strat_private *) ump->strategy_private) 71 72 /* --------------------------------------------------------------------- */ 73 74 /* BUFQ's */ 75 #define UDF_SHED_MAX 3 76 77 #define UDF_SHED_READING 0 78 #define UDF_SHED_WRITING 1 79 #define UDF_SHED_SEQWRITING 2 80 81 82 struct strat_private { 83 struct pool desc_pool; /* node descriptors */ 84 }; 85 86 /* --------------------------------------------------------------------- */ 87 88 static void 89 udf_wr_nodedscr_callback(struct buf *buf) 90 { 91 struct udf_node *udf_node; 92 93 KASSERT(buf); 94 KASSERT(buf->b_data); 95 96 /* called when write action is done */ 97 DPRINTF(WRITE, ("udf_wr_nodedscr_callback(): node written out\n")); 98 99 udf_node = VTOI(buf->b_vp); 100 if (udf_node == NULL) { 101 putiobuf(buf); 102 printf("udf_wr_node_callback: NULL node?\n"); 103 return; 104 } 105 106 /* XXX right flags to mark dirty again on error? */ 107 if (buf->b_error) { 108 /* write error on `defect free' media??? how to solve? */ 109 /* XXX lookup UDF standard for unallocatable space */ 110 udf_node->i_flags |= IN_MODIFIED | IN_ACCESSED; 111 } 112 113 /* decrement outstanding_nodedscr */ 114 KASSERT(udf_node->outstanding_nodedscr >= 1); 115 udf_node->outstanding_nodedscr--; 116 if (udf_node->outstanding_nodedscr == 0) { 117 /* unlock the node */ 118 UDF_UNLOCK_NODE(udf_node, 0); 119 wakeup(&udf_node->outstanding_nodedscr); 120 } 121 122 putiobuf(buf); 123 } 124 125 /* --------------------------------------------------------------------- */ 126 127 static int 128 udf_getblank_nodedscr_direct(struct udf_strat_args *args) 129 { 130 union dscrptr **dscrptr = &args->dscr; 131 struct udf_mount *ump = args->ump; 132 struct strat_private *priv = PRIV(ump); 133 uint32_t lb_size; 134 135 lb_size = udf_rw32(ump->logical_vol->lb_size); 136 *dscrptr = pool_get(&priv->desc_pool, PR_WAITOK); 137 memset(*dscrptr, 0, lb_size); 138 139 return 0; 140 } 141 142 143 static void 144 udf_free_nodedscr_direct(struct udf_strat_args *args) 145 { 146 union dscrptr *dscr = args->dscr; 147 struct udf_mount *ump = args->ump; 148 struct strat_private *priv = PRIV(ump); 149 150 pool_put(&priv->desc_pool, dscr); 151 } 152 153 154 static int 155 udf_read_nodedscr_direct(struct udf_strat_args *args) 156 { 157 union dscrptr **dscrptr = &args->dscr; 158 union dscrptr *tmpdscr; 159 struct udf_mount *ump = args->ump; 160 struct long_ad *icb = args->icb; 161 struct strat_private *priv = PRIV(ump); 162 uint32_t lb_size; 163 uint32_t sector, dummy; 164 int error; 165 166 lb_size = udf_rw32(ump->logical_vol->lb_size); 167 168 error = udf_translate_vtop(ump, icb, §or, &dummy); 169 if (error) 170 return error; 171 172 /* try to read in fe/efe */ 173 error = udf_read_phys_dscr(ump, sector, M_UDFTEMP, &tmpdscr); 174 if (error) 175 return error; 176 177 *dscrptr = pool_get(&priv->desc_pool, PR_WAITOK); 178 memcpy(*dscrptr, tmpdscr, lb_size); 179 free(tmpdscr, M_UDFTEMP); 180 181 return 0; 182 } 183 184 185 static int 186 udf_write_nodedscr_direct(struct udf_strat_args *args) 187 { 188 struct udf_mount *ump = args->ump; 189 struct udf_node *udf_node = args->udf_node; 190 union dscrptr *dscr = args->dscr; 191 struct long_ad *icb = args->icb; 192 int waitfor = args->waitfor; 193 uint32_t logsector, sector, dummy; 194 int error, vpart __diagused; 195 196 /* 197 * we have to decide if we write it out sequential or at its fixed 198 * position by examining the partition its (to be) written on. 199 */ 200 vpart = udf_rw16(udf_node->loc.loc.part_num); 201 logsector = udf_rw32(icb->loc.lb_num); 202 KASSERT(ump->vtop_tp[vpart] != UDF_VTOP_TYPE_VIRT); 203 204 sector = 0; 205 error = udf_translate_vtop(ump, icb, §or, &dummy); 206 if (error) 207 goto out; 208 209 if (waitfor) { 210 DPRINTF(WRITE, ("udf_write_nodedscr: sync write\n")); 211 212 error = udf_write_phys_dscr_sync(ump, udf_node, UDF_C_NODE, 213 dscr, sector, logsector); 214 } else { 215 DPRINTF(WRITE, ("udf_write_nodedscr: no wait, async write\n")); 216 217 error = udf_write_phys_dscr_async(ump, udf_node, UDF_C_NODE, 218 dscr, sector, logsector, udf_wr_nodedscr_callback); 219 /* will be UNLOCKED in call back */ 220 return error; 221 } 222 out: 223 udf_node->outstanding_nodedscr--; 224 if (udf_node->outstanding_nodedscr == 0) { 225 UDF_UNLOCK_NODE(udf_node, 0); 226 wakeup(&udf_node->outstanding_nodedscr); 227 } 228 229 return error; 230 } 231 232 /* --------------------------------------------------------------------- */ 233 234 static void 235 udf_queue_buf_direct(struct udf_strat_args *args) 236 { 237 struct udf_mount *ump = args->ump; 238 struct buf *buf = args->nestbuf; 239 struct buf *nestbuf; 240 struct desc_tag *tag; 241 struct long_ad *node_ad_cpy; 242 uint64_t *lmapping, *pmapping, *lmappos, run_start; 243 uint32_t sectornr; 244 uint32_t buf_offset, rbuflen, bpos; 245 uint16_t vpart_num; 246 uint8_t *fidblk; 247 off_t rblk; 248 int sector_size = ump->discinfo.sector_size; 249 int len, buf_len, sector, sectors, run_length; 250 int blks = sector_size / DEV_BSIZE; 251 int what, class __diagused, queue; 252 253 KASSERT(ump); 254 KASSERT(buf); 255 KASSERT(buf->b_iodone == nestiobuf_iodone); 256 257 what = buf->b_udf_c_type; 258 queue = UDF_SHED_READING; 259 if ((buf->b_flags & B_READ) == 0) { 260 /* writing */ 261 queue = UDF_SHED_SEQWRITING; 262 if (what == UDF_C_ABSOLUTE) 263 queue = UDF_SHED_WRITING; 264 if (what == UDF_C_DSCR) 265 queue = UDF_SHED_WRITING; 266 if (what == UDF_C_NODE) 267 queue = UDF_SHED_WRITING; 268 } 269 270 /* use disc sheduler */ 271 class = ump->discinfo.mmc_class; 272 KASSERT((class == MMC_CLASS_UNKN) || (class == MMC_CLASS_DISC) || 273 (ump->discinfo.mmc_cur & MMC_CAP_HW_DEFECTFREE) || 274 (ump->vfs_mountp->mnt_flag & MNT_RDONLY)); 275 276 #ifndef UDF_DEBUG 277 __USE(blks); 278 #endif 279 if (queue == UDF_SHED_READING) { 280 DPRINTF(SHEDULE, ("\nudf_issue_buf READ %p : sector %d type %d," 281 "b_resid %d, b_bcount %d, b_bufsize %d\n", 282 buf, (uint32_t) buf->b_blkno / blks, buf->b_udf_c_type, 283 buf->b_resid, buf->b_bcount, buf->b_bufsize)); 284 VOP_STRATEGY(ump->devvp, buf); 285 return; 286 } 287 288 289 if (queue == UDF_SHED_WRITING) { 290 DPRINTF(SHEDULE, ("\nudf_issue_buf WRITE %p : sector %d " 291 "type %d, b_resid %d, b_bcount %d, b_bufsize %d\n", 292 buf, (uint32_t) buf->b_blkno / blks, buf->b_udf_c_type, 293 buf->b_resid, buf->b_bcount, buf->b_bufsize)); 294 KASSERT(buf->b_udf_c_type == UDF_C_DSCR || 295 buf->b_udf_c_type == UDF_C_ABSOLUTE || 296 buf->b_udf_c_type == UDF_C_NODE); 297 udf_fixup_node_internals(ump, buf->b_data, buf->b_udf_c_type); 298 VOP_STRATEGY(ump->devvp, buf); 299 return; 300 } 301 302 /* UDF_SHED_SEQWRITING */ 303 KASSERT(queue == UDF_SHED_SEQWRITING); 304 DPRINTF(SHEDULE, ("\nudf_issue_buf SEQWRITE %p : sector XXXX " 305 "type %d, b_resid %d, b_bcount %d, b_bufsize %d\n", 306 buf, buf->b_udf_c_type, buf->b_resid, buf->b_bcount, 307 buf->b_bufsize)); 308 309 /* 310 * Buffers should not have been allocated to disc addresses yet on 311 * this queue. Note that a buffer can get multiple extents allocated. 312 * 313 * lmapping contains lb_num relative to base partition. 314 */ 315 lmapping = ump->la_lmapping; 316 node_ad_cpy = ump->la_node_ad_cpy; 317 318 /* logically allocate buf and map it in the file */ 319 udf_late_allocate_buf(ump, buf, lmapping, node_ad_cpy, &vpart_num); 320 321 /* if we have FIDs, fixup using the new allocation table */ 322 if (buf->b_udf_c_type == UDF_C_FIDS) { 323 buf_len = buf->b_bcount; 324 bpos = 0; 325 lmappos = lmapping; 326 while (buf_len) { 327 sectornr = *lmappos++; 328 len = MIN(buf_len, sector_size); 329 fidblk = (uint8_t *) buf->b_data + bpos; 330 udf_fixup_fid_block(fidblk, sector_size, 331 0, len, sectornr); 332 bpos += len; 333 buf_len -= len; 334 } 335 } 336 if (buf->b_udf_c_type == UDF_C_METADATA_SBM) { 337 if (buf->b_lblkno == 0) { 338 /* update the tag location inside */ 339 tag = (struct desc_tag *) buf->b_data; 340 tag->tag_loc = udf_rw32(*lmapping); 341 udf_validate_tag_and_crc_sums(buf->b_data); 342 } 343 } 344 udf_fixup_node_internals(ump, buf->b_data, buf->b_udf_c_type); 345 346 /* 347 * Translate new mappings in lmapping to pmappings and try to 348 * conglomerate extents to reduce the number of writes. 349 * 350 * pmapping to contain lb_nums as used for disc addressing. 351 */ 352 pmapping = ump->la_pmapping; 353 sectors = (buf->b_bcount + sector_size -1) / sector_size; 354 udf_translate_vtop_list(ump, sectors, vpart_num, lmapping, pmapping); 355 356 for (sector = 0; sector < sectors; sector++) { 357 buf_offset = sector * sector_size; 358 DPRINTF(WRITE, ("\tprocessing rel sector %d\n", sector)); 359 360 DPRINTF(WRITE, ("\tissue write sector %"PRIu64"\n", 361 pmapping[sector])); 362 363 run_start = pmapping[sector]; 364 run_length = 1; 365 while (sector < sectors-1) { 366 if (pmapping[sector+1] != pmapping[sector]+1) 367 break; 368 run_length++; 369 sector++; 370 } 371 372 /* nest an iobuf for the extent */ 373 rbuflen = run_length * sector_size; 374 rblk = run_start * (sector_size/DEV_BSIZE); 375 376 nestbuf = getiobuf(NULL, true); 377 nestiobuf_setup(buf, nestbuf, buf_offset, rbuflen); 378 /* nestbuf is B_ASYNC */ 379 380 /* identify this nestbuf */ 381 nestbuf->b_lblkno = sector; 382 assert(nestbuf->b_vp == buf->b_vp); 383 384 /* CD shedules on raw blkno */ 385 nestbuf->b_blkno = rblk; 386 nestbuf->b_proc = NULL; 387 nestbuf->b_rawblkno = rblk; 388 nestbuf->b_udf_c_type = UDF_C_PROCESSED; 389 390 VOP_STRATEGY(ump->devvp, nestbuf); 391 } 392 } 393 394 395 static void 396 udf_sync_caches_direct(struct udf_strat_args *args) 397 { 398 struct udf_mount *ump = args->ump; 399 400 udf_mmc_synchronise_caches(ump); 401 } 402 403 404 static void 405 udf_discstrat_init_direct(struct udf_strat_args *args) 406 { 407 struct udf_mount *ump = args->ump; 408 struct strat_private *priv = PRIV(ump); 409 uint32_t lb_size; 410 411 KASSERT(priv == NULL); 412 ump->strategy_private = malloc(sizeof(struct strat_private), 413 M_UDFTEMP, M_WAITOK); 414 priv = ump->strategy_private; 415 memset(priv, 0 , sizeof(struct strat_private)); 416 417 /* 418 * Initialise pool for descriptors associated with nodes. This is done 419 * in lb_size units though currently lb_size is dictated to be 420 * sector_size. 421 */ 422 memset(&priv->desc_pool, 0, sizeof(struct pool)); 423 424 lb_size = udf_rw32(ump->logical_vol->lb_size); 425 pool_init(&priv->desc_pool, lb_size, 0, 0, 0, "udf_desc_pool", NULL, 426 IPL_NONE); 427 } 428 429 430 static void 431 udf_discstrat_finish_direct(struct udf_strat_args *args) 432 { 433 struct udf_mount *ump = args->ump; 434 struct strat_private *priv = PRIV(ump); 435 436 /* destroy our pool */ 437 pool_destroy(&priv->desc_pool); 438 439 /* free our private space */ 440 free(ump->strategy_private, M_UDFTEMP); 441 ump->strategy_private = NULL; 442 } 443 444 /* --------------------------------------------------------------------- */ 445 446 struct udf_strategy udf_strat_direct = 447 { 448 udf_getblank_nodedscr_direct, 449 udf_free_nodedscr_direct, 450 udf_read_nodedscr_direct, 451 udf_write_nodedscr_direct, 452 udf_queue_buf_direct, 453 udf_sync_caches_direct, 454 udf_discstrat_init_direct, 455 udf_discstrat_finish_direct 456 }; 457 458