1 /* 2 * Copyright (C) 2000 Jens Axboe <axboe@suse.de> 3 * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com> 4 * Copyright (C) 2006 Thomas Maier <balagi@justmail.de> 5 * 6 * May be copied or modified under the terms of the GNU General Public 7 * License. See linux/COPYING for more information. 8 * 9 * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and 10 * DVD-RAM devices. 11 * 12 * Theory of operation: 13 * 14 * At the lowest level, there is the standard driver for the CD/DVD device, 15 * such as drivers/scsi/sr.c. This driver can handle read and write requests, 16 * but it doesn't know anything about the special restrictions that apply to 17 * packet writing. One restriction is that write requests must be aligned to 18 * packet boundaries on the physical media, and the size of a write request 19 * must be equal to the packet size. Another restriction is that a 20 * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read 21 * command, if the previous command was a write. 22 * 23 * The purpose of the packet writing driver is to hide these restrictions from 24 * higher layers, such as file systems, and present a block device that can be 25 * randomly read and written using 2kB-sized blocks. 26 * 27 * The lowest layer in the packet writing driver is the packet I/O scheduler. 28 * Its data is defined by the struct packet_iosched and includes two bio 29 * queues with pending read and write requests. These queues are processed 30 * by the pkt_iosched_process_queue() function. The write requests in this 31 * queue are already properly aligned and sized. This layer is responsible for 32 * issuing the flush cache commands and scheduling the I/O in a good order. 33 * 34 * The next layer transforms unaligned write requests to aligned writes. This 35 * transformation requires reading missing pieces of data from the underlying 36 * block device, assembling the pieces to full packets and queuing them to the 37 * packet I/O scheduler. 38 * 39 * At the top layer there is a custom ->submit_bio function that forwards 40 * read requests directly to the iosched queue and puts write requests in the 41 * unaligned write queue. A kernel thread performs the necessary read 42 * gathering to convert the unaligned writes to aligned writes and then feeds 43 * them to the packet I/O scheduler. 44 * 45 *************************************************************************/ 46 47 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 48 49 #include <linux/pktcdvd.h> 50 #include <linux/module.h> 51 #include <linux/types.h> 52 #include <linux/kernel.h> 53 #include <linux/compat.h> 54 #include <linux/kthread.h> 55 #include <linux/errno.h> 56 #include <linux/spinlock.h> 57 #include <linux/file.h> 58 #include <linux/proc_fs.h> 59 #include <linux/seq_file.h> 60 #include <linux/miscdevice.h> 61 #include <linux/freezer.h> 62 #include <linux/mutex.h> 63 #include <linux/slab.h> 64 #include <linux/backing-dev.h> 65 #include <scsi/scsi_cmnd.h> 66 #include <scsi/scsi_ioctl.h> 67 #include <scsi/scsi.h> 68 #include <linux/debugfs.h> 69 #include <linux/device.h> 70 #include <linux/nospec.h> 71 #include <linux/uaccess.h> 72 73 #define DRIVER_NAME "pktcdvd" 74 75 #define MAX_SPEED 0xffff 76 77 static DEFINE_MUTEX(pktcdvd_mutex); 78 static struct pktcdvd_device *pkt_devs[MAX_WRITERS]; 79 static struct proc_dir_entry *pkt_proc; 80 static int pktdev_major; 81 static int write_congestion_on = PKT_WRITE_CONGESTION_ON; 82 static int write_congestion_off = PKT_WRITE_CONGESTION_OFF; 83 static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */ 84 static mempool_t psd_pool; 85 static struct bio_set pkt_bio_set; 86 87 /* /sys/class/pktcdvd */ 88 static struct class class_pktcdvd; 89 static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */ 90 91 /* forward declaration */ 92 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev); 93 static int pkt_remove_dev(dev_t pkt_dev); 94 95 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd) 96 { 97 return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1); 98 } 99 100 /********************************************************** 101 * sysfs interface for pktcdvd 102 * by (C) 2006 Thomas Maier <balagi@justmail.de> 103 104 /sys/class/pktcdvd/pktcdvd[0-7]/ 105 stat/reset 106 stat/packets_started 107 stat/packets_finished 108 stat/kb_written 109 stat/kb_read 110 stat/kb_read_gather 111 write_queue/size 112 write_queue/congestion_off 113 write_queue/congestion_on 114 **********************************************************/ 115 116 static ssize_t packets_started_show(struct device *dev, 117 struct device_attribute *attr, char *buf) 118 { 119 struct pktcdvd_device *pd = dev_get_drvdata(dev); 120 121 return sysfs_emit(buf, "%lu\n", pd->stats.pkt_started); 122 } 123 static DEVICE_ATTR_RO(packets_started); 124 125 static ssize_t packets_finished_show(struct device *dev, 126 struct device_attribute *attr, char *buf) 127 { 128 struct pktcdvd_device *pd = dev_get_drvdata(dev); 129 130 return sysfs_emit(buf, "%lu\n", pd->stats.pkt_ended); 131 } 132 static DEVICE_ATTR_RO(packets_finished); 133 134 static ssize_t kb_written_show(struct device *dev, 135 struct device_attribute *attr, char *buf) 136 { 137 struct pktcdvd_device *pd = dev_get_drvdata(dev); 138 139 return sysfs_emit(buf, "%lu\n", pd->stats.secs_w >> 1); 140 } 141 static DEVICE_ATTR_RO(kb_written); 142 143 static ssize_t kb_read_show(struct device *dev, 144 struct device_attribute *attr, char *buf) 145 { 146 struct pktcdvd_device *pd = dev_get_drvdata(dev); 147 148 return sysfs_emit(buf, "%lu\n", pd->stats.secs_r >> 1); 149 } 150 static DEVICE_ATTR_RO(kb_read); 151 152 static ssize_t kb_read_gather_show(struct device *dev, 153 struct device_attribute *attr, char *buf) 154 { 155 struct pktcdvd_device *pd = dev_get_drvdata(dev); 156 157 return sysfs_emit(buf, "%lu\n", pd->stats.secs_rg >> 1); 158 } 159 static DEVICE_ATTR_RO(kb_read_gather); 160 161 static ssize_t reset_store(struct device *dev, struct device_attribute *attr, 162 const char *buf, size_t len) 163 { 164 struct pktcdvd_device *pd = dev_get_drvdata(dev); 165 166 if (len > 0) { 167 pd->stats.pkt_started = 0; 168 pd->stats.pkt_ended = 0; 169 pd->stats.secs_w = 0; 170 pd->stats.secs_rg = 0; 171 pd->stats.secs_r = 0; 172 } 173 return len; 174 } 175 static DEVICE_ATTR_WO(reset); 176 177 static struct attribute *pkt_stat_attrs[] = { 178 &dev_attr_packets_finished.attr, 179 &dev_attr_packets_started.attr, 180 &dev_attr_kb_read.attr, 181 &dev_attr_kb_written.attr, 182 &dev_attr_kb_read_gather.attr, 183 &dev_attr_reset.attr, 184 NULL, 185 }; 186 187 static const struct attribute_group pkt_stat_group = { 188 .name = "stat", 189 .attrs = pkt_stat_attrs, 190 }; 191 192 static ssize_t size_show(struct device *dev, 193 struct device_attribute *attr, char *buf) 194 { 195 struct pktcdvd_device *pd = dev_get_drvdata(dev); 196 int n; 197 198 spin_lock(&pd->lock); 199 n = sysfs_emit(buf, "%d\n", pd->bio_queue_size); 200 spin_unlock(&pd->lock); 201 return n; 202 } 203 static DEVICE_ATTR_RO(size); 204 205 static void init_write_congestion_marks(int* lo, int* hi) 206 { 207 if (*hi > 0) { 208 *hi = max(*hi, 500); 209 *hi = min(*hi, 1000000); 210 if (*lo <= 0) 211 *lo = *hi - 100; 212 else { 213 *lo = min(*lo, *hi - 100); 214 *lo = max(*lo, 100); 215 } 216 } else { 217 *hi = -1; 218 *lo = -1; 219 } 220 } 221 222 static ssize_t congestion_off_show(struct device *dev, 223 struct device_attribute *attr, char *buf) 224 { 225 struct pktcdvd_device *pd = dev_get_drvdata(dev); 226 int n; 227 228 spin_lock(&pd->lock); 229 n = sysfs_emit(buf, "%d\n", pd->write_congestion_off); 230 spin_unlock(&pd->lock); 231 return n; 232 } 233 234 static ssize_t congestion_off_store(struct device *dev, 235 struct device_attribute *attr, 236 const char *buf, size_t len) 237 { 238 struct pktcdvd_device *pd = dev_get_drvdata(dev); 239 int val, ret; 240 241 ret = kstrtoint(buf, 10, &val); 242 if (ret) 243 return ret; 244 245 spin_lock(&pd->lock); 246 pd->write_congestion_off = val; 247 init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on); 248 spin_unlock(&pd->lock); 249 return len; 250 } 251 static DEVICE_ATTR_RW(congestion_off); 252 253 static ssize_t congestion_on_show(struct device *dev, 254 struct device_attribute *attr, char *buf) 255 { 256 struct pktcdvd_device *pd = dev_get_drvdata(dev); 257 int n; 258 259 spin_lock(&pd->lock); 260 n = sysfs_emit(buf, "%d\n", pd->write_congestion_on); 261 spin_unlock(&pd->lock); 262 return n; 263 } 264 265 static ssize_t congestion_on_store(struct device *dev, 266 struct device_attribute *attr, 267 const char *buf, size_t len) 268 { 269 struct pktcdvd_device *pd = dev_get_drvdata(dev); 270 int val, ret; 271 272 ret = kstrtoint(buf, 10, &val); 273 if (ret) 274 return ret; 275 276 spin_lock(&pd->lock); 277 pd->write_congestion_on = val; 278 init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on); 279 spin_unlock(&pd->lock); 280 return len; 281 } 282 static DEVICE_ATTR_RW(congestion_on); 283 284 static struct attribute *pkt_wq_attrs[] = { 285 &dev_attr_congestion_on.attr, 286 &dev_attr_congestion_off.attr, 287 &dev_attr_size.attr, 288 NULL, 289 }; 290 291 static const struct attribute_group pkt_wq_group = { 292 .name = "write_queue", 293 .attrs = pkt_wq_attrs, 294 }; 295 296 static const struct attribute_group *pkt_groups[] = { 297 &pkt_stat_group, 298 &pkt_wq_group, 299 NULL, 300 }; 301 302 static void pkt_sysfs_dev_new(struct pktcdvd_device *pd) 303 { 304 if (class_is_registered(&class_pktcdvd)) { 305 pd->dev = device_create_with_groups(&class_pktcdvd, NULL, 306 MKDEV(0, 0), pd, pkt_groups, 307 "%s", pd->disk->disk_name); 308 if (IS_ERR(pd->dev)) 309 pd->dev = NULL; 310 } 311 } 312 313 static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd) 314 { 315 if (class_is_registered(&class_pktcdvd)) 316 device_unregister(pd->dev); 317 } 318 319 320 /******************************************************************** 321 /sys/class/pktcdvd/ 322 add map block device 323 remove unmap packet dev 324 device_map show mappings 325 *******************************************************************/ 326 327 static ssize_t device_map_show(const struct class *c, const struct class_attribute *attr, 328 char *data) 329 { 330 int n = 0; 331 int idx; 332 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 333 for (idx = 0; idx < MAX_WRITERS; idx++) { 334 struct pktcdvd_device *pd = pkt_devs[idx]; 335 if (!pd) 336 continue; 337 n += sysfs_emit_at(data, n, "%s %u:%u %u:%u\n", 338 pd->disk->disk_name, 339 MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev), 340 MAJOR(pd->bdev->bd_dev), 341 MINOR(pd->bdev->bd_dev)); 342 } 343 mutex_unlock(&ctl_mutex); 344 return n; 345 } 346 static CLASS_ATTR_RO(device_map); 347 348 static ssize_t add_store(const struct class *c, const struct class_attribute *attr, 349 const char *buf, size_t count) 350 { 351 unsigned int major, minor; 352 353 if (sscanf(buf, "%u:%u", &major, &minor) == 2) { 354 /* pkt_setup_dev() expects caller to hold reference to self */ 355 if (!try_module_get(THIS_MODULE)) 356 return -ENODEV; 357 358 pkt_setup_dev(MKDEV(major, minor), NULL); 359 360 module_put(THIS_MODULE); 361 362 return count; 363 } 364 365 return -EINVAL; 366 } 367 static CLASS_ATTR_WO(add); 368 369 static ssize_t remove_store(const struct class *c, const struct class_attribute *attr, 370 const char *buf, size_t count) 371 { 372 unsigned int major, minor; 373 if (sscanf(buf, "%u:%u", &major, &minor) == 2) { 374 pkt_remove_dev(MKDEV(major, minor)); 375 return count; 376 } 377 return -EINVAL; 378 } 379 static CLASS_ATTR_WO(remove); 380 381 static struct attribute *class_pktcdvd_attrs[] = { 382 &class_attr_add.attr, 383 &class_attr_remove.attr, 384 &class_attr_device_map.attr, 385 NULL, 386 }; 387 ATTRIBUTE_GROUPS(class_pktcdvd); 388 389 static struct class class_pktcdvd = { 390 .name = DRIVER_NAME, 391 .class_groups = class_pktcdvd_groups, 392 }; 393 394 static int pkt_sysfs_init(void) 395 { 396 /* 397 * create control files in sysfs 398 * /sys/class/pktcdvd/... 399 */ 400 return class_register(&class_pktcdvd); 401 } 402 403 static void pkt_sysfs_cleanup(void) 404 { 405 class_unregister(&class_pktcdvd); 406 } 407 408 /******************************************************************** 409 entries in debugfs 410 411 /sys/kernel/debug/pktcdvd[0-7]/ 412 info 413 414 *******************************************************************/ 415 416 static void pkt_count_states(struct pktcdvd_device *pd, int *states) 417 { 418 struct packet_data *pkt; 419 int i; 420 421 for (i = 0; i < PACKET_NUM_STATES; i++) 422 states[i] = 0; 423 424 spin_lock(&pd->cdrw.active_list_lock); 425 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { 426 states[pkt->state]++; 427 } 428 spin_unlock(&pd->cdrw.active_list_lock); 429 } 430 431 static int pkt_seq_show(struct seq_file *m, void *p) 432 { 433 struct pktcdvd_device *pd = m->private; 434 char *msg; 435 int states[PACKET_NUM_STATES]; 436 437 seq_printf(m, "Writer %s mapped to %pg:\n", pd->disk->disk_name, pd->bdev); 438 439 seq_printf(m, "\nSettings:\n"); 440 seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2); 441 442 if (pd->settings.write_type == 0) 443 msg = "Packet"; 444 else 445 msg = "Unknown"; 446 seq_printf(m, "\twrite type:\t\t%s\n", msg); 447 448 seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable"); 449 seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss); 450 451 seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode); 452 453 if (pd->settings.block_mode == PACKET_BLOCK_MODE1) 454 msg = "Mode 1"; 455 else if (pd->settings.block_mode == PACKET_BLOCK_MODE2) 456 msg = "Mode 2"; 457 else 458 msg = "Unknown"; 459 seq_printf(m, "\tblock mode:\t\t%s\n", msg); 460 461 seq_printf(m, "\nStatistics:\n"); 462 seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started); 463 seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended); 464 seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1); 465 seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1); 466 seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1); 467 468 seq_printf(m, "\nMisc:\n"); 469 seq_printf(m, "\treference count:\t%d\n", pd->refcnt); 470 seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags); 471 seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed); 472 seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed); 473 seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset); 474 seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset); 475 476 seq_printf(m, "\nQueue state:\n"); 477 seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size); 478 seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios)); 479 seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector); 480 481 pkt_count_states(pd, states); 482 seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n", 483 states[0], states[1], states[2], states[3], states[4], states[5]); 484 485 seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n", 486 pd->write_congestion_off, 487 pd->write_congestion_on); 488 return 0; 489 } 490 491 static int pkt_debugfs_seq_show(struct seq_file *m, void *p) 492 { 493 return pkt_seq_show(m, p); 494 } 495 496 static int pkt_debugfs_fops_open(struct inode *inode, struct file *file) 497 { 498 return single_open(file, pkt_debugfs_seq_show, inode->i_private); 499 } 500 501 static const struct file_operations debug_fops = { 502 .open = pkt_debugfs_fops_open, 503 .read = seq_read, 504 .llseek = seq_lseek, 505 .release = single_release, 506 .owner = THIS_MODULE, 507 }; 508 509 static void pkt_debugfs_dev_new(struct pktcdvd_device *pd) 510 { 511 if (!pkt_debugfs_root) 512 return; 513 pd->dfs_d_root = debugfs_create_dir(pd->disk->disk_name, pkt_debugfs_root); 514 if (!pd->dfs_d_root) 515 return; 516 517 pd->dfs_f_info = debugfs_create_file("info", 0444, 518 pd->dfs_d_root, pd, &debug_fops); 519 } 520 521 static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd) 522 { 523 if (!pkt_debugfs_root) 524 return; 525 debugfs_remove(pd->dfs_f_info); 526 debugfs_remove(pd->dfs_d_root); 527 pd->dfs_f_info = NULL; 528 pd->dfs_d_root = NULL; 529 } 530 531 static void pkt_debugfs_init(void) 532 { 533 pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL); 534 } 535 536 static void pkt_debugfs_cleanup(void) 537 { 538 debugfs_remove(pkt_debugfs_root); 539 pkt_debugfs_root = NULL; 540 } 541 542 /* ----------------------------------------------------------*/ 543 544 545 static void pkt_bio_finished(struct pktcdvd_device *pd) 546 { 547 struct device *ddev = disk_to_dev(pd->disk); 548 549 BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0); 550 if (atomic_dec_and_test(&pd->cdrw.pending_bios)) { 551 dev_dbg(ddev, "queue empty\n"); 552 atomic_set(&pd->iosched.attention, 1); 553 wake_up(&pd->wqueue); 554 } 555 } 556 557 /* 558 * Allocate a packet_data struct 559 */ 560 static struct packet_data *pkt_alloc_packet_data(int frames) 561 { 562 int i; 563 struct packet_data *pkt; 564 565 pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL); 566 if (!pkt) 567 goto no_pkt; 568 569 pkt->frames = frames; 570 pkt->w_bio = bio_kmalloc(frames, GFP_KERNEL); 571 if (!pkt->w_bio) 572 goto no_bio; 573 574 for (i = 0; i < frames / FRAMES_PER_PAGE; i++) { 575 pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO); 576 if (!pkt->pages[i]) 577 goto no_page; 578 } 579 580 spin_lock_init(&pkt->lock); 581 bio_list_init(&pkt->orig_bios); 582 583 for (i = 0; i < frames; i++) { 584 pkt->r_bios[i] = bio_kmalloc(1, GFP_KERNEL); 585 if (!pkt->r_bios[i]) 586 goto no_rd_bio; 587 } 588 589 return pkt; 590 591 no_rd_bio: 592 for (i = 0; i < frames; i++) 593 kfree(pkt->r_bios[i]); 594 no_page: 595 for (i = 0; i < frames / FRAMES_PER_PAGE; i++) 596 if (pkt->pages[i]) 597 __free_page(pkt->pages[i]); 598 kfree(pkt->w_bio); 599 no_bio: 600 kfree(pkt); 601 no_pkt: 602 return NULL; 603 } 604 605 /* 606 * Free a packet_data struct 607 */ 608 static void pkt_free_packet_data(struct packet_data *pkt) 609 { 610 int i; 611 612 for (i = 0; i < pkt->frames; i++) 613 kfree(pkt->r_bios[i]); 614 for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++) 615 __free_page(pkt->pages[i]); 616 kfree(pkt->w_bio); 617 kfree(pkt); 618 } 619 620 static void pkt_shrink_pktlist(struct pktcdvd_device *pd) 621 { 622 struct packet_data *pkt, *next; 623 624 BUG_ON(!list_empty(&pd->cdrw.pkt_active_list)); 625 626 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) { 627 pkt_free_packet_data(pkt); 628 } 629 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); 630 } 631 632 static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets) 633 { 634 struct packet_data *pkt; 635 636 BUG_ON(!list_empty(&pd->cdrw.pkt_free_list)); 637 638 while (nr_packets > 0) { 639 pkt = pkt_alloc_packet_data(pd->settings.size >> 2); 640 if (!pkt) { 641 pkt_shrink_pktlist(pd); 642 return 0; 643 } 644 pkt->id = nr_packets; 645 pkt->pd = pd; 646 list_add(&pkt->list, &pd->cdrw.pkt_free_list); 647 nr_packets--; 648 } 649 return 1; 650 } 651 652 static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node) 653 { 654 struct rb_node *n = rb_next(&node->rb_node); 655 if (!n) 656 return NULL; 657 return rb_entry(n, struct pkt_rb_node, rb_node); 658 } 659 660 static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node) 661 { 662 rb_erase(&node->rb_node, &pd->bio_queue); 663 mempool_free(node, &pd->rb_pool); 664 pd->bio_queue_size--; 665 BUG_ON(pd->bio_queue_size < 0); 666 } 667 668 /* 669 * Find the first node in the pd->bio_queue rb tree with a starting sector >= s. 670 */ 671 static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s) 672 { 673 struct rb_node *n = pd->bio_queue.rb_node; 674 struct rb_node *next; 675 struct pkt_rb_node *tmp; 676 677 if (!n) { 678 BUG_ON(pd->bio_queue_size > 0); 679 return NULL; 680 } 681 682 for (;;) { 683 tmp = rb_entry(n, struct pkt_rb_node, rb_node); 684 if (s <= tmp->bio->bi_iter.bi_sector) 685 next = n->rb_left; 686 else 687 next = n->rb_right; 688 if (!next) 689 break; 690 n = next; 691 } 692 693 if (s > tmp->bio->bi_iter.bi_sector) { 694 tmp = pkt_rbtree_next(tmp); 695 if (!tmp) 696 return NULL; 697 } 698 BUG_ON(s > tmp->bio->bi_iter.bi_sector); 699 return tmp; 700 } 701 702 /* 703 * Insert a node into the pd->bio_queue rb tree. 704 */ 705 static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node) 706 { 707 struct rb_node **p = &pd->bio_queue.rb_node; 708 struct rb_node *parent = NULL; 709 sector_t s = node->bio->bi_iter.bi_sector; 710 struct pkt_rb_node *tmp; 711 712 while (*p) { 713 parent = *p; 714 tmp = rb_entry(parent, struct pkt_rb_node, rb_node); 715 if (s < tmp->bio->bi_iter.bi_sector) 716 p = &(*p)->rb_left; 717 else 718 p = &(*p)->rb_right; 719 } 720 rb_link_node(&node->rb_node, parent, p); 721 rb_insert_color(&node->rb_node, &pd->bio_queue); 722 pd->bio_queue_size++; 723 } 724 725 /* 726 * Send a packet_command to the underlying block device and 727 * wait for completion. 728 */ 729 static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc) 730 { 731 struct request_queue *q = bdev_get_queue(pd->bdev); 732 struct scsi_cmnd *scmd; 733 struct request *rq; 734 int ret = 0; 735 736 rq = scsi_alloc_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? 737 REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); 738 if (IS_ERR(rq)) 739 return PTR_ERR(rq); 740 scmd = blk_mq_rq_to_pdu(rq); 741 742 if (cgc->buflen) { 743 ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, 744 GFP_NOIO); 745 if (ret) 746 goto out; 747 } 748 749 scmd->cmd_len = COMMAND_SIZE(cgc->cmd[0]); 750 memcpy(scmd->cmnd, cgc->cmd, CDROM_PACKET_SIZE); 751 752 rq->timeout = 60*HZ; 753 if (cgc->quiet) 754 rq->rq_flags |= RQF_QUIET; 755 756 blk_execute_rq(rq, false); 757 if (scmd->result) 758 ret = -EIO; 759 out: 760 blk_mq_free_request(rq); 761 return ret; 762 } 763 764 static const char *sense_key_string(__u8 index) 765 { 766 static const char * const info[] = { 767 "No sense", "Recovered error", "Not ready", 768 "Medium error", "Hardware error", "Illegal request", 769 "Unit attention", "Data protect", "Blank check", 770 }; 771 772 return index < ARRAY_SIZE(info) ? info[index] : "INVALID"; 773 } 774 775 /* 776 * A generic sense dump / resolve mechanism should be implemented across 777 * all ATAPI + SCSI devices. 778 */ 779 static void pkt_dump_sense(struct pktcdvd_device *pd, 780 struct packet_command *cgc) 781 { 782 struct device *ddev = disk_to_dev(pd->disk); 783 struct scsi_sense_hdr *sshdr = cgc->sshdr; 784 785 if (sshdr) 786 dev_err(ddev, "%*ph - sense %02x.%02x.%02x (%s)\n", 787 CDROM_PACKET_SIZE, cgc->cmd, 788 sshdr->sense_key, sshdr->asc, sshdr->ascq, 789 sense_key_string(sshdr->sense_key)); 790 else 791 dev_err(ddev, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd); 792 } 793 794 /* 795 * flush the drive cache to media 796 */ 797 static int pkt_flush_cache(struct pktcdvd_device *pd) 798 { 799 struct packet_command cgc; 800 801 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); 802 cgc.cmd[0] = GPCMD_FLUSH_CACHE; 803 cgc.quiet = 1; 804 805 /* 806 * the IMMED bit -- we default to not setting it, although that 807 * would allow a much faster close, this is safer 808 */ 809 #if 0 810 cgc.cmd[1] = 1 << 1; 811 #endif 812 return pkt_generic_packet(pd, &cgc); 813 } 814 815 /* 816 * speed is given as the normal factor, e.g. 4 for 4x 817 */ 818 static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd, 819 unsigned write_speed, unsigned read_speed) 820 { 821 struct packet_command cgc; 822 struct scsi_sense_hdr sshdr; 823 int ret; 824 825 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); 826 cgc.sshdr = &sshdr; 827 cgc.cmd[0] = GPCMD_SET_SPEED; 828 cgc.cmd[2] = (read_speed >> 8) & 0xff; 829 cgc.cmd[3] = read_speed & 0xff; 830 cgc.cmd[4] = (write_speed >> 8) & 0xff; 831 cgc.cmd[5] = write_speed & 0xff; 832 833 ret = pkt_generic_packet(pd, &cgc); 834 if (ret) 835 pkt_dump_sense(pd, &cgc); 836 837 return ret; 838 } 839 840 /* 841 * Queue a bio for processing by the low-level CD device. Must be called 842 * from process context. 843 */ 844 static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio) 845 { 846 spin_lock(&pd->iosched.lock); 847 if (bio_data_dir(bio) == READ) 848 bio_list_add(&pd->iosched.read_queue, bio); 849 else 850 bio_list_add(&pd->iosched.write_queue, bio); 851 spin_unlock(&pd->iosched.lock); 852 853 atomic_set(&pd->iosched.attention, 1); 854 wake_up(&pd->wqueue); 855 } 856 857 /* 858 * Process the queued read/write requests. This function handles special 859 * requirements for CDRW drives: 860 * - A cache flush command must be inserted before a read request if the 861 * previous request was a write. 862 * - Switching between reading and writing is slow, so don't do it more often 863 * than necessary. 864 * - Optimize for throughput at the expense of latency. This means that streaming 865 * writes will never be interrupted by a read, but if the drive has to seek 866 * before the next write, switch to reading instead if there are any pending 867 * read requests. 868 * - Set the read speed according to current usage pattern. When only reading 869 * from the device, it's best to use the highest possible read speed, but 870 * when switching often between reading and writing, it's better to have the 871 * same read and write speeds. 872 */ 873 static void pkt_iosched_process_queue(struct pktcdvd_device *pd) 874 { 875 struct device *ddev = disk_to_dev(pd->disk); 876 877 if (atomic_read(&pd->iosched.attention) == 0) 878 return; 879 atomic_set(&pd->iosched.attention, 0); 880 881 for (;;) { 882 struct bio *bio; 883 int reads_queued, writes_queued; 884 885 spin_lock(&pd->iosched.lock); 886 reads_queued = !bio_list_empty(&pd->iosched.read_queue); 887 writes_queued = !bio_list_empty(&pd->iosched.write_queue); 888 spin_unlock(&pd->iosched.lock); 889 890 if (!reads_queued && !writes_queued) 891 break; 892 893 if (pd->iosched.writing) { 894 int need_write_seek = 1; 895 spin_lock(&pd->iosched.lock); 896 bio = bio_list_peek(&pd->iosched.write_queue); 897 spin_unlock(&pd->iosched.lock); 898 if (bio && (bio->bi_iter.bi_sector == 899 pd->iosched.last_write)) 900 need_write_seek = 0; 901 if (need_write_seek && reads_queued) { 902 if (atomic_read(&pd->cdrw.pending_bios) > 0) { 903 dev_dbg(ddev, "write, waiting\n"); 904 break; 905 } 906 pkt_flush_cache(pd); 907 pd->iosched.writing = 0; 908 } 909 } else { 910 if (!reads_queued && writes_queued) { 911 if (atomic_read(&pd->cdrw.pending_bios) > 0) { 912 dev_dbg(ddev, "read, waiting\n"); 913 break; 914 } 915 pd->iosched.writing = 1; 916 } 917 } 918 919 spin_lock(&pd->iosched.lock); 920 if (pd->iosched.writing) 921 bio = bio_list_pop(&pd->iosched.write_queue); 922 else 923 bio = bio_list_pop(&pd->iosched.read_queue); 924 spin_unlock(&pd->iosched.lock); 925 926 if (!bio) 927 continue; 928 929 if (bio_data_dir(bio) == READ) 930 pd->iosched.successive_reads += 931 bio->bi_iter.bi_size >> 10; 932 else { 933 pd->iosched.successive_reads = 0; 934 pd->iosched.last_write = bio_end_sector(bio); 935 } 936 if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) { 937 if (pd->read_speed == pd->write_speed) { 938 pd->read_speed = MAX_SPEED; 939 pkt_set_speed(pd, pd->write_speed, pd->read_speed); 940 } 941 } else { 942 if (pd->read_speed != pd->write_speed) { 943 pd->read_speed = pd->write_speed; 944 pkt_set_speed(pd, pd->write_speed, pd->read_speed); 945 } 946 } 947 948 atomic_inc(&pd->cdrw.pending_bios); 949 submit_bio_noacct(bio); 950 } 951 } 952 953 /* 954 * Special care is needed if the underlying block device has a small 955 * max_phys_segments value. 956 */ 957 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) 958 { 959 struct device *ddev = disk_to_dev(pd->disk); 960 961 if ((pd->settings.size << 9) / CD_FRAMESIZE 962 <= queue_max_segments(q)) { 963 /* 964 * The cdrom device can handle one segment/frame 965 */ 966 clear_bit(PACKET_MERGE_SEGS, &pd->flags); 967 return 0; 968 } else if ((pd->settings.size << 9) / PAGE_SIZE 969 <= queue_max_segments(q)) { 970 /* 971 * We can handle this case at the expense of some extra memory 972 * copies during write operations 973 */ 974 set_bit(PACKET_MERGE_SEGS, &pd->flags); 975 return 0; 976 } else { 977 dev_err(ddev, "cdrom max_phys_segments too small\n"); 978 return -EIO; 979 } 980 } 981 982 static void pkt_end_io_read(struct bio *bio) 983 { 984 struct packet_data *pkt = bio->bi_private; 985 struct pktcdvd_device *pd = pkt->pd; 986 BUG_ON(!pd); 987 988 dev_dbg(disk_to_dev(pd->disk), "bio=%p sec0=%llx sec=%llx err=%d\n", 989 bio, (unsigned long long)pkt->sector, 990 (unsigned long long)bio->bi_iter.bi_sector, bio->bi_status); 991 992 if (bio->bi_status) 993 atomic_inc(&pkt->io_errors); 994 bio_uninit(bio); 995 if (atomic_dec_and_test(&pkt->io_wait)) { 996 atomic_inc(&pkt->run_sm); 997 wake_up(&pd->wqueue); 998 } 999 pkt_bio_finished(pd); 1000 } 1001 1002 static void pkt_end_io_packet_write(struct bio *bio) 1003 { 1004 struct packet_data *pkt = bio->bi_private; 1005 struct pktcdvd_device *pd = pkt->pd; 1006 BUG_ON(!pd); 1007 1008 dev_dbg(disk_to_dev(pd->disk), "id=%d, err=%d\n", pkt->id, bio->bi_status); 1009 1010 pd->stats.pkt_ended++; 1011 1012 bio_uninit(bio); 1013 pkt_bio_finished(pd); 1014 atomic_dec(&pkt->io_wait); 1015 atomic_inc(&pkt->run_sm); 1016 wake_up(&pd->wqueue); 1017 } 1018 1019 /* 1020 * Schedule reads for the holes in a packet 1021 */ 1022 static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) 1023 { 1024 struct device *ddev = disk_to_dev(pd->disk); 1025 int frames_read = 0; 1026 struct bio *bio; 1027 int f; 1028 char written[PACKET_MAX_SIZE]; 1029 1030 BUG_ON(bio_list_empty(&pkt->orig_bios)); 1031 1032 atomic_set(&pkt->io_wait, 0); 1033 atomic_set(&pkt->io_errors, 0); 1034 1035 /* 1036 * Figure out which frames we need to read before we can write. 1037 */ 1038 memset(written, 0, sizeof(written)); 1039 spin_lock(&pkt->lock); 1040 bio_list_for_each(bio, &pkt->orig_bios) { 1041 int first_frame = (bio->bi_iter.bi_sector - pkt->sector) / 1042 (CD_FRAMESIZE >> 9); 1043 int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE; 1044 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); 1045 BUG_ON(first_frame < 0); 1046 BUG_ON(first_frame + num_frames > pkt->frames); 1047 for (f = first_frame; f < first_frame + num_frames; f++) 1048 written[f] = 1; 1049 } 1050 spin_unlock(&pkt->lock); 1051 1052 if (pkt->cache_valid) { 1053 dev_dbg(ddev, "zone %llx cached\n", (unsigned long long)pkt->sector); 1054 goto out_account; 1055 } 1056 1057 /* 1058 * Schedule reads for missing parts of the packet. 1059 */ 1060 for (f = 0; f < pkt->frames; f++) { 1061 int p, offset; 1062 1063 if (written[f]) 1064 continue; 1065 1066 bio = pkt->r_bios[f]; 1067 bio_init(bio, pd->bdev, bio->bi_inline_vecs, 1, REQ_OP_READ); 1068 bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); 1069 bio->bi_end_io = pkt_end_io_read; 1070 bio->bi_private = pkt; 1071 1072 p = (f * CD_FRAMESIZE) / PAGE_SIZE; 1073 offset = (f * CD_FRAMESIZE) % PAGE_SIZE; 1074 dev_dbg(ddev, "Adding frame %d, page:%p offs:%d\n", f, 1075 pkt->pages[p], offset); 1076 if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset)) 1077 BUG(); 1078 1079 atomic_inc(&pkt->io_wait); 1080 pkt_queue_bio(pd, bio); 1081 frames_read++; 1082 } 1083 1084 out_account: 1085 dev_dbg(ddev, "need %d frames for zone %llx\n", frames_read, 1086 (unsigned long long)pkt->sector); 1087 pd->stats.pkt_started++; 1088 pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9); 1089 } 1090 1091 /* 1092 * Find a packet matching zone, or the least recently used packet if 1093 * there is no match. 1094 */ 1095 static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone) 1096 { 1097 struct packet_data *pkt; 1098 1099 list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) { 1100 if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) { 1101 list_del_init(&pkt->list); 1102 if (pkt->sector != zone) 1103 pkt->cache_valid = 0; 1104 return pkt; 1105 } 1106 } 1107 BUG(); 1108 return NULL; 1109 } 1110 1111 static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt) 1112 { 1113 if (pkt->cache_valid) { 1114 list_add(&pkt->list, &pd->cdrw.pkt_free_list); 1115 } else { 1116 list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list); 1117 } 1118 } 1119 1120 static inline void pkt_set_state(struct device *ddev, struct packet_data *pkt, 1121 enum packet_data_state state) 1122 { 1123 static const char *state_name[] = { 1124 "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED" 1125 }; 1126 enum packet_data_state old_state = pkt->state; 1127 1128 dev_dbg(ddev, "pkt %2d : s=%6llx %s -> %s\n", 1129 pkt->id, (unsigned long long)pkt->sector, 1130 state_name[old_state], state_name[state]); 1131 1132 pkt->state = state; 1133 } 1134 1135 /* 1136 * Scan the work queue to see if we can start a new packet. 1137 * returns non-zero if any work was done. 1138 */ 1139 static int pkt_handle_queue(struct pktcdvd_device *pd) 1140 { 1141 struct device *ddev = disk_to_dev(pd->disk); 1142 struct packet_data *pkt, *p; 1143 struct bio *bio = NULL; 1144 sector_t zone = 0; /* Suppress gcc warning */ 1145 struct pkt_rb_node *node, *first_node; 1146 struct rb_node *n; 1147 1148 atomic_set(&pd->scan_queue, 0); 1149 1150 if (list_empty(&pd->cdrw.pkt_free_list)) { 1151 dev_dbg(ddev, "no pkt\n"); 1152 return 0; 1153 } 1154 1155 /* 1156 * Try to find a zone we are not already working on. 1157 */ 1158 spin_lock(&pd->lock); 1159 first_node = pkt_rbtree_find(pd, pd->current_sector); 1160 if (!first_node) { 1161 n = rb_first(&pd->bio_queue); 1162 if (n) 1163 first_node = rb_entry(n, struct pkt_rb_node, rb_node); 1164 } 1165 node = first_node; 1166 while (node) { 1167 bio = node->bio; 1168 zone = get_zone(bio->bi_iter.bi_sector, pd); 1169 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) { 1170 if (p->sector == zone) { 1171 bio = NULL; 1172 goto try_next_bio; 1173 } 1174 } 1175 break; 1176 try_next_bio: 1177 node = pkt_rbtree_next(node); 1178 if (!node) { 1179 n = rb_first(&pd->bio_queue); 1180 if (n) 1181 node = rb_entry(n, struct pkt_rb_node, rb_node); 1182 } 1183 if (node == first_node) 1184 node = NULL; 1185 } 1186 spin_unlock(&pd->lock); 1187 if (!bio) { 1188 dev_dbg(ddev, "no bio\n"); 1189 return 0; 1190 } 1191 1192 pkt = pkt_get_packet_data(pd, zone); 1193 1194 pd->current_sector = zone + pd->settings.size; 1195 pkt->sector = zone; 1196 BUG_ON(pkt->frames != pd->settings.size >> 2); 1197 pkt->write_size = 0; 1198 1199 /* 1200 * Scan work queue for bios in the same zone and link them 1201 * to this packet. 1202 */ 1203 spin_lock(&pd->lock); 1204 dev_dbg(ddev, "looking for zone %llx\n", (unsigned long long)zone); 1205 while ((node = pkt_rbtree_find(pd, zone)) != NULL) { 1206 sector_t tmp = get_zone(node->bio->bi_iter.bi_sector, pd); 1207 1208 bio = node->bio; 1209 dev_dbg(ddev, "found zone=%llx\n", (unsigned long long)tmp); 1210 if (tmp != zone) 1211 break; 1212 pkt_rbtree_erase(pd, node); 1213 spin_lock(&pkt->lock); 1214 bio_list_add(&pkt->orig_bios, bio); 1215 pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE; 1216 spin_unlock(&pkt->lock); 1217 } 1218 /* check write congestion marks, and if bio_queue_size is 1219 * below, wake up any waiters 1220 */ 1221 if (pd->congested && 1222 pd->bio_queue_size <= pd->write_congestion_off) { 1223 pd->congested = false; 1224 wake_up_var(&pd->congested); 1225 } 1226 spin_unlock(&pd->lock); 1227 1228 pkt->sleep_time = max(PACKET_WAIT_TIME, 1); 1229 pkt_set_state(ddev, pkt, PACKET_WAITING_STATE); 1230 atomic_set(&pkt->run_sm, 1); 1231 1232 spin_lock(&pd->cdrw.active_list_lock); 1233 list_add(&pkt->list, &pd->cdrw.pkt_active_list); 1234 spin_unlock(&pd->cdrw.active_list_lock); 1235 1236 return 1; 1237 } 1238 1239 /** 1240 * bio_list_copy_data - copy contents of data buffers from one chain of bios to 1241 * another 1242 * @src: source bio list 1243 * @dst: destination bio list 1244 * 1245 * Stops when it reaches the end of either the @src list or @dst list - that is, 1246 * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of 1247 * bios). 1248 */ 1249 static void bio_list_copy_data(struct bio *dst, struct bio *src) 1250 { 1251 struct bvec_iter src_iter = src->bi_iter; 1252 struct bvec_iter dst_iter = dst->bi_iter; 1253 1254 while (1) { 1255 if (!src_iter.bi_size) { 1256 src = src->bi_next; 1257 if (!src) 1258 break; 1259 1260 src_iter = src->bi_iter; 1261 } 1262 1263 if (!dst_iter.bi_size) { 1264 dst = dst->bi_next; 1265 if (!dst) 1266 break; 1267 1268 dst_iter = dst->bi_iter; 1269 } 1270 1271 bio_copy_data_iter(dst, &dst_iter, src, &src_iter); 1272 } 1273 } 1274 1275 /* 1276 * Assemble a bio to write one packet and queue the bio for processing 1277 * by the underlying block device. 1278 */ 1279 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) 1280 { 1281 struct device *ddev = disk_to_dev(pd->disk); 1282 int f; 1283 1284 bio_init(pkt->w_bio, pd->bdev, pkt->w_bio->bi_inline_vecs, pkt->frames, 1285 REQ_OP_WRITE); 1286 pkt->w_bio->bi_iter.bi_sector = pkt->sector; 1287 pkt->w_bio->bi_end_io = pkt_end_io_packet_write; 1288 pkt->w_bio->bi_private = pkt; 1289 1290 /* XXX: locking? */ 1291 for (f = 0; f < pkt->frames; f++) { 1292 struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE]; 1293 unsigned offset = (f * CD_FRAMESIZE) % PAGE_SIZE; 1294 1295 if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset)) 1296 BUG(); 1297 } 1298 dev_dbg(ddev, "vcnt=%d\n", pkt->w_bio->bi_vcnt); 1299 1300 /* 1301 * Fill-in bvec with data from orig_bios. 1302 */ 1303 spin_lock(&pkt->lock); 1304 bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head); 1305 1306 pkt_set_state(ddev, pkt, PACKET_WRITE_WAIT_STATE); 1307 spin_unlock(&pkt->lock); 1308 1309 dev_dbg(ddev, "Writing %d frames for zone %llx\n", pkt->write_size, 1310 (unsigned long long)pkt->sector); 1311 1312 if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) 1313 pkt->cache_valid = 1; 1314 else 1315 pkt->cache_valid = 0; 1316 1317 /* Start the write request */ 1318 atomic_set(&pkt->io_wait, 1); 1319 pkt_queue_bio(pd, pkt->w_bio); 1320 } 1321 1322 static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status) 1323 { 1324 struct bio *bio; 1325 1326 if (status) 1327 pkt->cache_valid = 0; 1328 1329 /* Finish all bios corresponding to this packet */ 1330 while ((bio = bio_list_pop(&pkt->orig_bios))) { 1331 bio->bi_status = status; 1332 bio_endio(bio); 1333 } 1334 } 1335 1336 static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt) 1337 { 1338 struct device *ddev = disk_to_dev(pd->disk); 1339 1340 dev_dbg(ddev, "pkt %d\n", pkt->id); 1341 1342 for (;;) { 1343 switch (pkt->state) { 1344 case PACKET_WAITING_STATE: 1345 if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0)) 1346 return; 1347 1348 pkt->sleep_time = 0; 1349 pkt_gather_data(pd, pkt); 1350 pkt_set_state(ddev, pkt, PACKET_READ_WAIT_STATE); 1351 break; 1352 1353 case PACKET_READ_WAIT_STATE: 1354 if (atomic_read(&pkt->io_wait) > 0) 1355 return; 1356 1357 if (atomic_read(&pkt->io_errors) > 0) { 1358 pkt_set_state(ddev, pkt, PACKET_RECOVERY_STATE); 1359 } else { 1360 pkt_start_write(pd, pkt); 1361 } 1362 break; 1363 1364 case PACKET_WRITE_WAIT_STATE: 1365 if (atomic_read(&pkt->io_wait) > 0) 1366 return; 1367 1368 if (!pkt->w_bio->bi_status) { 1369 pkt_set_state(ddev, pkt, PACKET_FINISHED_STATE); 1370 } else { 1371 pkt_set_state(ddev, pkt, PACKET_RECOVERY_STATE); 1372 } 1373 break; 1374 1375 case PACKET_RECOVERY_STATE: 1376 dev_dbg(ddev, "No recovery possible\n"); 1377 pkt_set_state(ddev, pkt, PACKET_FINISHED_STATE); 1378 break; 1379 1380 case PACKET_FINISHED_STATE: 1381 pkt_finish_packet(pkt, pkt->w_bio->bi_status); 1382 return; 1383 1384 default: 1385 BUG(); 1386 break; 1387 } 1388 } 1389 } 1390 1391 static void pkt_handle_packets(struct pktcdvd_device *pd) 1392 { 1393 struct device *ddev = disk_to_dev(pd->disk); 1394 struct packet_data *pkt, *next; 1395 1396 /* 1397 * Run state machine for active packets 1398 */ 1399 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { 1400 if (atomic_read(&pkt->run_sm) > 0) { 1401 atomic_set(&pkt->run_sm, 0); 1402 pkt_run_state_machine(pd, pkt); 1403 } 1404 } 1405 1406 /* 1407 * Move no longer active packets to the free list 1408 */ 1409 spin_lock(&pd->cdrw.active_list_lock); 1410 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) { 1411 if (pkt->state == PACKET_FINISHED_STATE) { 1412 list_del(&pkt->list); 1413 pkt_put_packet_data(pd, pkt); 1414 pkt_set_state(ddev, pkt, PACKET_IDLE_STATE); 1415 atomic_set(&pd->scan_queue, 1); 1416 } 1417 } 1418 spin_unlock(&pd->cdrw.active_list_lock); 1419 } 1420 1421 /* 1422 * kcdrwd is woken up when writes have been queued for one of our 1423 * registered devices 1424 */ 1425 static int kcdrwd(void *foobar) 1426 { 1427 struct pktcdvd_device *pd = foobar; 1428 struct device *ddev = disk_to_dev(pd->disk); 1429 struct packet_data *pkt; 1430 int states[PACKET_NUM_STATES]; 1431 long min_sleep_time, residue; 1432 1433 set_user_nice(current, MIN_NICE); 1434 set_freezable(); 1435 1436 for (;;) { 1437 DECLARE_WAITQUEUE(wait, current); 1438 1439 /* 1440 * Wait until there is something to do 1441 */ 1442 add_wait_queue(&pd->wqueue, &wait); 1443 for (;;) { 1444 set_current_state(TASK_INTERRUPTIBLE); 1445 1446 /* Check if we need to run pkt_handle_queue */ 1447 if (atomic_read(&pd->scan_queue) > 0) 1448 goto work_to_do; 1449 1450 /* Check if we need to run the state machine for some packet */ 1451 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { 1452 if (atomic_read(&pkt->run_sm) > 0) 1453 goto work_to_do; 1454 } 1455 1456 /* Check if we need to process the iosched queues */ 1457 if (atomic_read(&pd->iosched.attention) != 0) 1458 goto work_to_do; 1459 1460 /* Otherwise, go to sleep */ 1461 pkt_count_states(pd, states); 1462 dev_dbg(ddev, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n", 1463 states[0], states[1], states[2], states[3], states[4], states[5]); 1464 1465 min_sleep_time = MAX_SCHEDULE_TIMEOUT; 1466 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { 1467 if (pkt->sleep_time && pkt->sleep_time < min_sleep_time) 1468 min_sleep_time = pkt->sleep_time; 1469 } 1470 1471 dev_dbg(ddev, "sleeping\n"); 1472 residue = schedule_timeout(min_sleep_time); 1473 dev_dbg(ddev, "wake up\n"); 1474 1475 /* make swsusp happy with our thread */ 1476 try_to_freeze(); 1477 1478 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { 1479 if (!pkt->sleep_time) 1480 continue; 1481 pkt->sleep_time -= min_sleep_time - residue; 1482 if (pkt->sleep_time <= 0) { 1483 pkt->sleep_time = 0; 1484 atomic_inc(&pkt->run_sm); 1485 } 1486 } 1487 1488 if (kthread_should_stop()) 1489 break; 1490 } 1491 work_to_do: 1492 set_current_state(TASK_RUNNING); 1493 remove_wait_queue(&pd->wqueue, &wait); 1494 1495 if (kthread_should_stop()) 1496 break; 1497 1498 /* 1499 * if pkt_handle_queue returns true, we can queue 1500 * another request. 1501 */ 1502 while (pkt_handle_queue(pd)) 1503 ; 1504 1505 /* 1506 * Handle packet state machine 1507 */ 1508 pkt_handle_packets(pd); 1509 1510 /* 1511 * Handle iosched queues 1512 */ 1513 pkt_iosched_process_queue(pd); 1514 } 1515 1516 return 0; 1517 } 1518 1519 static void pkt_print_settings(struct pktcdvd_device *pd) 1520 { 1521 dev_info(disk_to_dev(pd->disk), "%s packets, %u blocks, Mode-%c disc\n", 1522 pd->settings.fp ? "Fixed" : "Variable", 1523 pd->settings.size >> 2, 1524 pd->settings.block_mode == 8 ? '1' : '2'); 1525 } 1526 1527 static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control) 1528 { 1529 memset(cgc->cmd, 0, sizeof(cgc->cmd)); 1530 1531 cgc->cmd[0] = GPCMD_MODE_SENSE_10; 1532 cgc->cmd[2] = page_code | (page_control << 6); 1533 cgc->cmd[7] = cgc->buflen >> 8; 1534 cgc->cmd[8] = cgc->buflen & 0xff; 1535 cgc->data_direction = CGC_DATA_READ; 1536 return pkt_generic_packet(pd, cgc); 1537 } 1538 1539 static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc) 1540 { 1541 memset(cgc->cmd, 0, sizeof(cgc->cmd)); 1542 memset(cgc->buffer, 0, 2); 1543 cgc->cmd[0] = GPCMD_MODE_SELECT_10; 1544 cgc->cmd[1] = 0x10; /* PF */ 1545 cgc->cmd[7] = cgc->buflen >> 8; 1546 cgc->cmd[8] = cgc->buflen & 0xff; 1547 cgc->data_direction = CGC_DATA_WRITE; 1548 return pkt_generic_packet(pd, cgc); 1549 } 1550 1551 static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di) 1552 { 1553 struct packet_command cgc; 1554 int ret; 1555 1556 /* set up command and get the disc info */ 1557 init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ); 1558 cgc.cmd[0] = GPCMD_READ_DISC_INFO; 1559 cgc.cmd[8] = cgc.buflen = 2; 1560 cgc.quiet = 1; 1561 1562 ret = pkt_generic_packet(pd, &cgc); 1563 if (ret) 1564 return ret; 1565 1566 /* not all drives have the same disc_info length, so requeue 1567 * packet with the length the drive tells us it can supply 1568 */ 1569 cgc.buflen = be16_to_cpu(di->disc_information_length) + 1570 sizeof(di->disc_information_length); 1571 1572 if (cgc.buflen > sizeof(disc_information)) 1573 cgc.buflen = sizeof(disc_information); 1574 1575 cgc.cmd[8] = cgc.buflen; 1576 return pkt_generic_packet(pd, &cgc); 1577 } 1578 1579 static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti) 1580 { 1581 struct packet_command cgc; 1582 int ret; 1583 1584 init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ); 1585 cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO; 1586 cgc.cmd[1] = type & 3; 1587 cgc.cmd[4] = (track & 0xff00) >> 8; 1588 cgc.cmd[5] = track & 0xff; 1589 cgc.cmd[8] = 8; 1590 cgc.quiet = 1; 1591 1592 ret = pkt_generic_packet(pd, &cgc); 1593 if (ret) 1594 return ret; 1595 1596 cgc.buflen = be16_to_cpu(ti->track_information_length) + 1597 sizeof(ti->track_information_length); 1598 1599 if (cgc.buflen > sizeof(track_information)) 1600 cgc.buflen = sizeof(track_information); 1601 1602 cgc.cmd[8] = cgc.buflen; 1603 return pkt_generic_packet(pd, &cgc); 1604 } 1605 1606 static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd, 1607 long *last_written) 1608 { 1609 disc_information di; 1610 track_information ti; 1611 __u32 last_track; 1612 int ret; 1613 1614 ret = pkt_get_disc_info(pd, &di); 1615 if (ret) 1616 return ret; 1617 1618 last_track = (di.last_track_msb << 8) | di.last_track_lsb; 1619 ret = pkt_get_track_info(pd, last_track, 1, &ti); 1620 if (ret) 1621 return ret; 1622 1623 /* if this track is blank, try the previous. */ 1624 if (ti.blank) { 1625 last_track--; 1626 ret = pkt_get_track_info(pd, last_track, 1, &ti); 1627 if (ret) 1628 return ret; 1629 } 1630 1631 /* if last recorded field is valid, return it. */ 1632 if (ti.lra_v) { 1633 *last_written = be32_to_cpu(ti.last_rec_address); 1634 } else { 1635 /* make it up instead */ 1636 *last_written = be32_to_cpu(ti.track_start) + 1637 be32_to_cpu(ti.track_size); 1638 if (ti.free_blocks) 1639 *last_written -= (be32_to_cpu(ti.free_blocks) + 7); 1640 } 1641 return 0; 1642 } 1643 1644 /* 1645 * write mode select package based on pd->settings 1646 */ 1647 static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) 1648 { 1649 struct device *ddev = disk_to_dev(pd->disk); 1650 struct packet_command cgc; 1651 struct scsi_sense_hdr sshdr; 1652 write_param_page *wp; 1653 char buffer[128]; 1654 int ret, size; 1655 1656 /* doesn't apply to DVD+RW or DVD-RAM */ 1657 if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12)) 1658 return 0; 1659 1660 memset(buffer, 0, sizeof(buffer)); 1661 init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ); 1662 cgc.sshdr = &sshdr; 1663 ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0); 1664 if (ret) { 1665 pkt_dump_sense(pd, &cgc); 1666 return ret; 1667 } 1668 1669 size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff)); 1670 pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff); 1671 if (size > sizeof(buffer)) 1672 size = sizeof(buffer); 1673 1674 /* 1675 * now get it all 1676 */ 1677 init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ); 1678 cgc.sshdr = &sshdr; 1679 ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0); 1680 if (ret) { 1681 pkt_dump_sense(pd, &cgc); 1682 return ret; 1683 } 1684 1685 /* 1686 * write page is offset header + block descriptor length 1687 */ 1688 wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset]; 1689 1690 wp->fp = pd->settings.fp; 1691 wp->track_mode = pd->settings.track_mode; 1692 wp->write_type = pd->settings.write_type; 1693 wp->data_block_type = pd->settings.block_mode; 1694 1695 wp->multi_session = 0; 1696 1697 #ifdef PACKET_USE_LS 1698 wp->link_size = 7; 1699 wp->ls_v = 1; 1700 #endif 1701 1702 if (wp->data_block_type == PACKET_BLOCK_MODE1) { 1703 wp->session_format = 0; 1704 wp->subhdr2 = 0x20; 1705 } else if (wp->data_block_type == PACKET_BLOCK_MODE2) { 1706 wp->session_format = 0x20; 1707 wp->subhdr2 = 8; 1708 #if 0 1709 wp->mcn[0] = 0x80; 1710 memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1); 1711 #endif 1712 } else { 1713 /* 1714 * paranoia 1715 */ 1716 dev_err(ddev, "write mode wrong %d\n", wp->data_block_type); 1717 return 1; 1718 } 1719 wp->packet_size = cpu_to_be32(pd->settings.size >> 2); 1720 1721 cgc.buflen = cgc.cmd[8] = size; 1722 ret = pkt_mode_select(pd, &cgc); 1723 if (ret) { 1724 pkt_dump_sense(pd, &cgc); 1725 return ret; 1726 } 1727 1728 pkt_print_settings(pd); 1729 return 0; 1730 } 1731 1732 /* 1733 * 1 -- we can write to this track, 0 -- we can't 1734 */ 1735 static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti) 1736 { 1737 struct device *ddev = disk_to_dev(pd->disk); 1738 1739 switch (pd->mmc3_profile) { 1740 case 0x1a: /* DVD+RW */ 1741 case 0x12: /* DVD-RAM */ 1742 /* The track is always writable on DVD+RW/DVD-RAM */ 1743 return 1; 1744 default: 1745 break; 1746 } 1747 1748 if (!ti->packet || !ti->fp) 1749 return 0; 1750 1751 /* 1752 * "good" settings as per Mt Fuji. 1753 */ 1754 if (ti->rt == 0 && ti->blank == 0) 1755 return 1; 1756 1757 if (ti->rt == 0 && ti->blank == 1) 1758 return 1; 1759 1760 if (ti->rt == 1 && ti->blank == 0) 1761 return 1; 1762 1763 dev_err(ddev, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet); 1764 return 0; 1765 } 1766 1767 /* 1768 * 1 -- we can write to this disc, 0 -- we can't 1769 */ 1770 static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di) 1771 { 1772 struct device *ddev = disk_to_dev(pd->disk); 1773 1774 switch (pd->mmc3_profile) { 1775 case 0x0a: /* CD-RW */ 1776 case 0xffff: /* MMC3 not supported */ 1777 break; 1778 case 0x1a: /* DVD+RW */ 1779 case 0x13: /* DVD-RW */ 1780 case 0x12: /* DVD-RAM */ 1781 return 1; 1782 default: 1783 dev_dbg(ddev, "Wrong disc profile (%x)\n", pd->mmc3_profile); 1784 return 0; 1785 } 1786 1787 /* 1788 * for disc type 0xff we should probably reserve a new track. 1789 * but i'm not sure, should we leave this to user apps? probably. 1790 */ 1791 if (di->disc_type == 0xff) { 1792 dev_notice(ddev, "unknown disc - no track?\n"); 1793 return 0; 1794 } 1795 1796 if (di->disc_type != 0x20 && di->disc_type != 0) { 1797 dev_err(ddev, "wrong disc type (%x)\n", di->disc_type); 1798 return 0; 1799 } 1800 1801 if (di->erasable == 0) { 1802 dev_err(ddev, "disc not erasable\n"); 1803 return 0; 1804 } 1805 1806 if (di->border_status == PACKET_SESSION_RESERVED) { 1807 dev_err(ddev, "can't write to last track (reserved)\n"); 1808 return 0; 1809 } 1810 1811 return 1; 1812 } 1813 1814 static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) 1815 { 1816 struct device *ddev = disk_to_dev(pd->disk); 1817 struct packet_command cgc; 1818 unsigned char buf[12]; 1819 disc_information di; 1820 track_information ti; 1821 int ret, track; 1822 1823 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ); 1824 cgc.cmd[0] = GPCMD_GET_CONFIGURATION; 1825 cgc.cmd[8] = 8; 1826 ret = pkt_generic_packet(pd, &cgc); 1827 pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7]; 1828 1829 memset(&di, 0, sizeof(disc_information)); 1830 memset(&ti, 0, sizeof(track_information)); 1831 1832 ret = pkt_get_disc_info(pd, &di); 1833 if (ret) { 1834 dev_err(ddev, "failed get_disc\n"); 1835 return ret; 1836 } 1837 1838 if (!pkt_writable_disc(pd, &di)) 1839 return -EROFS; 1840 1841 pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR; 1842 1843 track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */ 1844 ret = pkt_get_track_info(pd, track, 1, &ti); 1845 if (ret) { 1846 dev_err(ddev, "failed get_track\n"); 1847 return ret; 1848 } 1849 1850 if (!pkt_writable_track(pd, &ti)) { 1851 dev_err(ddev, "can't write to this track\n"); 1852 return -EROFS; 1853 } 1854 1855 /* 1856 * we keep packet size in 512 byte units, makes it easier to 1857 * deal with request calculations. 1858 */ 1859 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2; 1860 if (pd->settings.size == 0) { 1861 dev_notice(ddev, "detected zero packet size!\n"); 1862 return -ENXIO; 1863 } 1864 if (pd->settings.size > PACKET_MAX_SECTORS) { 1865 dev_err(ddev, "packet size is too big\n"); 1866 return -EROFS; 1867 } 1868 pd->settings.fp = ti.fp; 1869 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1); 1870 1871 if (ti.nwa_v) { 1872 pd->nwa = be32_to_cpu(ti.next_writable); 1873 set_bit(PACKET_NWA_VALID, &pd->flags); 1874 } 1875 1876 /* 1877 * in theory we could use lra on -RW media as well and just zero 1878 * blocks that haven't been written yet, but in practice that 1879 * is just a no-go. we'll use that for -R, naturally. 1880 */ 1881 if (ti.lra_v) { 1882 pd->lra = be32_to_cpu(ti.last_rec_address); 1883 set_bit(PACKET_LRA_VALID, &pd->flags); 1884 } else { 1885 pd->lra = 0xffffffff; 1886 set_bit(PACKET_LRA_VALID, &pd->flags); 1887 } 1888 1889 /* 1890 * fine for now 1891 */ 1892 pd->settings.link_loss = 7; 1893 pd->settings.write_type = 0; /* packet */ 1894 pd->settings.track_mode = ti.track_mode; 1895 1896 /* 1897 * mode1 or mode2 disc 1898 */ 1899 switch (ti.data_mode) { 1900 case PACKET_MODE1: 1901 pd->settings.block_mode = PACKET_BLOCK_MODE1; 1902 break; 1903 case PACKET_MODE2: 1904 pd->settings.block_mode = PACKET_BLOCK_MODE2; 1905 break; 1906 default: 1907 dev_err(ddev, "unknown data mode\n"); 1908 return -EROFS; 1909 } 1910 return 0; 1911 } 1912 1913 /* 1914 * enable/disable write caching on drive 1915 */ 1916 static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd) 1917 { 1918 struct device *ddev = disk_to_dev(pd->disk); 1919 struct packet_command cgc; 1920 struct scsi_sense_hdr sshdr; 1921 unsigned char buf[64]; 1922 bool set = IS_ENABLED(CONFIG_CDROM_PKTCDVD_WCACHE); 1923 int ret; 1924 1925 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ); 1926 cgc.sshdr = &sshdr; 1927 cgc.buflen = pd->mode_offset + 12; 1928 1929 /* 1930 * caching mode page might not be there, so quiet this command 1931 */ 1932 cgc.quiet = 1; 1933 1934 ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0); 1935 if (ret) 1936 return ret; 1937 1938 /* 1939 * use drive write caching -- we need deferred error handling to be 1940 * able to successfully recover with this option (drive will return good 1941 * status as soon as the cdb is validated). 1942 */ 1943 buf[pd->mode_offset + 10] |= (set << 2); 1944 1945 cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff)); 1946 ret = pkt_mode_select(pd, &cgc); 1947 if (ret) { 1948 dev_err(ddev, "write caching control failed\n"); 1949 pkt_dump_sense(pd, &cgc); 1950 } else if (!ret && set) 1951 dev_notice(ddev, "enabled write caching\n"); 1952 return ret; 1953 } 1954 1955 static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag) 1956 { 1957 struct packet_command cgc; 1958 1959 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); 1960 cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; 1961 cgc.cmd[4] = lockflag ? 1 : 0; 1962 return pkt_generic_packet(pd, &cgc); 1963 } 1964 1965 /* 1966 * Returns drive maximum write speed 1967 */ 1968 static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd, 1969 unsigned *write_speed) 1970 { 1971 struct packet_command cgc; 1972 struct scsi_sense_hdr sshdr; 1973 unsigned char buf[256+18]; 1974 unsigned char *cap_buf; 1975 int ret, offset; 1976 1977 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset]; 1978 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN); 1979 cgc.sshdr = &sshdr; 1980 1981 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0); 1982 if (ret) { 1983 cgc.buflen = pd->mode_offset + cap_buf[1] + 2 + 1984 sizeof(struct mode_page_header); 1985 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0); 1986 if (ret) { 1987 pkt_dump_sense(pd, &cgc); 1988 return ret; 1989 } 1990 } 1991 1992 offset = 20; /* Obsoleted field, used by older drives */ 1993 if (cap_buf[1] >= 28) 1994 offset = 28; /* Current write speed selected */ 1995 if (cap_buf[1] >= 30) { 1996 /* If the drive reports at least one "Logical Unit Write 1997 * Speed Performance Descriptor Block", use the information 1998 * in the first block. (contains the highest speed) 1999 */ 2000 int num_spdb = (cap_buf[30] << 8) + cap_buf[31]; 2001 if (num_spdb > 0) 2002 offset = 34; 2003 } 2004 2005 *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1]; 2006 return 0; 2007 } 2008 2009 /* These tables from cdrecord - I don't have orange book */ 2010 /* standard speed CD-RW (1-4x) */ 2011 static char clv_to_speed[16] = { 2012 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */ 2013 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 2014 }; 2015 /* high speed CD-RW (-10x) */ 2016 static char hs_clv_to_speed[16] = { 2017 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */ 2018 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 2019 }; 2020 /* ultra high speed CD-RW */ 2021 static char us_clv_to_speed[16] = { 2022 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */ 2023 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0 2024 }; 2025 2026 /* 2027 * reads the maximum media speed from ATIP 2028 */ 2029 static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, 2030 unsigned *speed) 2031 { 2032 struct device *ddev = disk_to_dev(pd->disk); 2033 struct packet_command cgc; 2034 struct scsi_sense_hdr sshdr; 2035 unsigned char buf[64]; 2036 unsigned int size, st, sp; 2037 int ret; 2038 2039 init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ); 2040 cgc.sshdr = &sshdr; 2041 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP; 2042 cgc.cmd[1] = 2; 2043 cgc.cmd[2] = 4; /* READ ATIP */ 2044 cgc.cmd[8] = 2; 2045 ret = pkt_generic_packet(pd, &cgc); 2046 if (ret) { 2047 pkt_dump_sense(pd, &cgc); 2048 return ret; 2049 } 2050 size = ((unsigned int) buf[0]<<8) + buf[1] + 2; 2051 if (size > sizeof(buf)) 2052 size = sizeof(buf); 2053 2054 init_cdrom_command(&cgc, buf, size, CGC_DATA_READ); 2055 cgc.sshdr = &sshdr; 2056 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP; 2057 cgc.cmd[1] = 2; 2058 cgc.cmd[2] = 4; 2059 cgc.cmd[8] = size; 2060 ret = pkt_generic_packet(pd, &cgc); 2061 if (ret) { 2062 pkt_dump_sense(pd, &cgc); 2063 return ret; 2064 } 2065 2066 if (!(buf[6] & 0x40)) { 2067 dev_notice(ddev, "disc type is not CD-RW\n"); 2068 return 1; 2069 } 2070 if (!(buf[6] & 0x4)) { 2071 dev_notice(ddev, "A1 values on media are not valid, maybe not CDRW?\n"); 2072 return 1; 2073 } 2074 2075 st = (buf[6] >> 3) & 0x7; /* disc sub-type */ 2076 2077 sp = buf[16] & 0xf; /* max speed from ATIP A1 field */ 2078 2079 /* Info from cdrecord */ 2080 switch (st) { 2081 case 0: /* standard speed */ 2082 *speed = clv_to_speed[sp]; 2083 break; 2084 case 1: /* high speed */ 2085 *speed = hs_clv_to_speed[sp]; 2086 break; 2087 case 2: /* ultra high speed */ 2088 *speed = us_clv_to_speed[sp]; 2089 break; 2090 default: 2091 dev_notice(ddev, "unknown disc sub-type %d\n", st); 2092 return 1; 2093 } 2094 if (*speed) { 2095 dev_info(ddev, "maximum media speed: %d\n", *speed); 2096 return 0; 2097 } else { 2098 dev_notice(ddev, "unknown speed %d for sub-type %d\n", sp, st); 2099 return 1; 2100 } 2101 } 2102 2103 static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd) 2104 { 2105 struct device *ddev = disk_to_dev(pd->disk); 2106 struct packet_command cgc; 2107 struct scsi_sense_hdr sshdr; 2108 int ret; 2109 2110 dev_dbg(ddev, "Performing OPC\n"); 2111 2112 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); 2113 cgc.sshdr = &sshdr; 2114 cgc.timeout = 60*HZ; 2115 cgc.cmd[0] = GPCMD_SEND_OPC; 2116 cgc.cmd[1] = 1; 2117 ret = pkt_generic_packet(pd, &cgc); 2118 if (ret) 2119 pkt_dump_sense(pd, &cgc); 2120 return ret; 2121 } 2122 2123 static int pkt_open_write(struct pktcdvd_device *pd) 2124 { 2125 struct device *ddev = disk_to_dev(pd->disk); 2126 int ret; 2127 unsigned int write_speed, media_write_speed, read_speed; 2128 2129 ret = pkt_probe_settings(pd); 2130 if (ret) { 2131 dev_dbg(ddev, "failed probe\n"); 2132 return ret; 2133 } 2134 2135 ret = pkt_set_write_settings(pd); 2136 if (ret) { 2137 dev_notice(ddev, "failed saving write settings\n"); 2138 return -EIO; 2139 } 2140 2141 pkt_write_caching(pd); 2142 2143 ret = pkt_get_max_speed(pd, &write_speed); 2144 if (ret) 2145 write_speed = 16 * 177; 2146 switch (pd->mmc3_profile) { 2147 case 0x13: /* DVD-RW */ 2148 case 0x1a: /* DVD+RW */ 2149 case 0x12: /* DVD-RAM */ 2150 dev_notice(ddev, "write speed %ukB/s\n", write_speed); 2151 break; 2152 default: 2153 ret = pkt_media_speed(pd, &media_write_speed); 2154 if (ret) 2155 media_write_speed = 16; 2156 write_speed = min(write_speed, media_write_speed * 177); 2157 dev_notice(ddev, "write speed %ux\n", write_speed / 176); 2158 break; 2159 } 2160 read_speed = write_speed; 2161 2162 ret = pkt_set_speed(pd, write_speed, read_speed); 2163 if (ret) { 2164 dev_notice(ddev, "couldn't set write speed\n"); 2165 return -EIO; 2166 } 2167 pd->write_speed = write_speed; 2168 pd->read_speed = read_speed; 2169 2170 ret = pkt_perform_opc(pd); 2171 if (ret) 2172 dev_notice(ddev, "Optimum Power Calibration failed\n"); 2173 2174 return 0; 2175 } 2176 2177 /* 2178 * called at open time. 2179 */ 2180 static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) 2181 { 2182 struct device *ddev = disk_to_dev(pd->disk); 2183 int ret; 2184 long lba; 2185 struct request_queue *q; 2186 struct block_device *bdev; 2187 2188 /* 2189 * We need to re-open the cdrom device without O_NONBLOCK to be able 2190 * to read/write from/to it. It is already opened in O_NONBLOCK mode 2191 * so open should not fail. 2192 */ 2193 bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ | FMODE_EXCL, pd, 2194 NULL); 2195 if (IS_ERR(bdev)) { 2196 ret = PTR_ERR(bdev); 2197 goto out; 2198 } 2199 2200 ret = pkt_get_last_written(pd, &lba); 2201 if (ret) { 2202 dev_err(ddev, "pkt_get_last_written failed\n"); 2203 goto out_putdev; 2204 } 2205 2206 set_capacity(pd->disk, lba << 2); 2207 set_capacity_and_notify(pd->bdev->bd_disk, lba << 2); 2208 2209 q = bdev_get_queue(pd->bdev); 2210 if (write) { 2211 ret = pkt_open_write(pd); 2212 if (ret) 2213 goto out_putdev; 2214 /* 2215 * Some CDRW drives can not handle writes larger than one packet, 2216 * even if the size is a multiple of the packet size. 2217 */ 2218 blk_queue_max_hw_sectors(q, pd->settings.size); 2219 set_bit(PACKET_WRITABLE, &pd->flags); 2220 } else { 2221 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); 2222 clear_bit(PACKET_WRITABLE, &pd->flags); 2223 } 2224 2225 ret = pkt_set_segment_merging(pd, q); 2226 if (ret) 2227 goto out_putdev; 2228 2229 if (write) { 2230 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) { 2231 dev_err(ddev, "not enough memory for buffers\n"); 2232 ret = -ENOMEM; 2233 goto out_putdev; 2234 } 2235 dev_info(ddev, "%lukB available on disc\n", lba << 1); 2236 } 2237 2238 return 0; 2239 2240 out_putdev: 2241 blkdev_put(bdev, FMODE_READ | FMODE_EXCL); 2242 out: 2243 return ret; 2244 } 2245 2246 /* 2247 * called when the device is closed. makes sure that the device flushes 2248 * the internal cache before we close. 2249 */ 2250 static void pkt_release_dev(struct pktcdvd_device *pd, int flush) 2251 { 2252 struct device *ddev = disk_to_dev(pd->disk); 2253 2254 if (flush && pkt_flush_cache(pd)) 2255 dev_notice(ddev, "not flushing cache\n"); 2256 2257 pkt_lock_door(pd, 0); 2258 2259 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); 2260 blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL); 2261 2262 pkt_shrink_pktlist(pd); 2263 } 2264 2265 static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor) 2266 { 2267 if (dev_minor >= MAX_WRITERS) 2268 return NULL; 2269 2270 dev_minor = array_index_nospec(dev_minor, MAX_WRITERS); 2271 return pkt_devs[dev_minor]; 2272 } 2273 2274 static int pkt_open(struct block_device *bdev, fmode_t mode) 2275 { 2276 struct pktcdvd_device *pd = NULL; 2277 int ret; 2278 2279 mutex_lock(&pktcdvd_mutex); 2280 mutex_lock(&ctl_mutex); 2281 pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev)); 2282 if (!pd) { 2283 ret = -ENODEV; 2284 goto out; 2285 } 2286 BUG_ON(pd->refcnt < 0); 2287 2288 pd->refcnt++; 2289 if (pd->refcnt > 1) { 2290 if ((mode & FMODE_WRITE) && 2291 !test_bit(PACKET_WRITABLE, &pd->flags)) { 2292 ret = -EBUSY; 2293 goto out_dec; 2294 } 2295 } else { 2296 ret = pkt_open_dev(pd, mode & FMODE_WRITE); 2297 if (ret) 2298 goto out_dec; 2299 /* 2300 * needed here as well, since ext2 (among others) may change 2301 * the blocksize at mount time 2302 */ 2303 set_blocksize(bdev, CD_FRAMESIZE); 2304 } 2305 2306 mutex_unlock(&ctl_mutex); 2307 mutex_unlock(&pktcdvd_mutex); 2308 return 0; 2309 2310 out_dec: 2311 pd->refcnt--; 2312 out: 2313 mutex_unlock(&ctl_mutex); 2314 mutex_unlock(&pktcdvd_mutex); 2315 return ret; 2316 } 2317 2318 static void pkt_close(struct gendisk *disk, fmode_t mode) 2319 { 2320 struct pktcdvd_device *pd = disk->private_data; 2321 2322 mutex_lock(&pktcdvd_mutex); 2323 mutex_lock(&ctl_mutex); 2324 pd->refcnt--; 2325 BUG_ON(pd->refcnt < 0); 2326 if (pd->refcnt == 0) { 2327 int flush = test_bit(PACKET_WRITABLE, &pd->flags); 2328 pkt_release_dev(pd, flush); 2329 } 2330 mutex_unlock(&ctl_mutex); 2331 mutex_unlock(&pktcdvd_mutex); 2332 } 2333 2334 2335 static void pkt_end_io_read_cloned(struct bio *bio) 2336 { 2337 struct packet_stacked_data *psd = bio->bi_private; 2338 struct pktcdvd_device *pd = psd->pd; 2339 2340 psd->bio->bi_status = bio->bi_status; 2341 bio_put(bio); 2342 bio_endio(psd->bio); 2343 mempool_free(psd, &psd_pool); 2344 pkt_bio_finished(pd); 2345 } 2346 2347 static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio) 2348 { 2349 struct bio *cloned_bio = 2350 bio_alloc_clone(pd->bdev, bio, GFP_NOIO, &pkt_bio_set); 2351 struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO); 2352 2353 psd->pd = pd; 2354 psd->bio = bio; 2355 cloned_bio->bi_private = psd; 2356 cloned_bio->bi_end_io = pkt_end_io_read_cloned; 2357 pd->stats.secs_r += bio_sectors(bio); 2358 pkt_queue_bio(pd, cloned_bio); 2359 } 2360 2361 static void pkt_make_request_write(struct request_queue *q, struct bio *bio) 2362 { 2363 struct pktcdvd_device *pd = q->queuedata; 2364 sector_t zone; 2365 struct packet_data *pkt; 2366 int was_empty, blocked_bio; 2367 struct pkt_rb_node *node; 2368 2369 zone = get_zone(bio->bi_iter.bi_sector, pd); 2370 2371 /* 2372 * If we find a matching packet in state WAITING or READ_WAIT, we can 2373 * just append this bio to that packet. 2374 */ 2375 spin_lock(&pd->cdrw.active_list_lock); 2376 blocked_bio = 0; 2377 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { 2378 if (pkt->sector == zone) { 2379 spin_lock(&pkt->lock); 2380 if ((pkt->state == PACKET_WAITING_STATE) || 2381 (pkt->state == PACKET_READ_WAIT_STATE)) { 2382 bio_list_add(&pkt->orig_bios, bio); 2383 pkt->write_size += 2384 bio->bi_iter.bi_size / CD_FRAMESIZE; 2385 if ((pkt->write_size >= pkt->frames) && 2386 (pkt->state == PACKET_WAITING_STATE)) { 2387 atomic_inc(&pkt->run_sm); 2388 wake_up(&pd->wqueue); 2389 } 2390 spin_unlock(&pkt->lock); 2391 spin_unlock(&pd->cdrw.active_list_lock); 2392 return; 2393 } else { 2394 blocked_bio = 1; 2395 } 2396 spin_unlock(&pkt->lock); 2397 } 2398 } 2399 spin_unlock(&pd->cdrw.active_list_lock); 2400 2401 /* 2402 * Test if there is enough room left in the bio work queue 2403 * (queue size >= congestion on mark). 2404 * If not, wait till the work queue size is below the congestion off mark. 2405 */ 2406 spin_lock(&pd->lock); 2407 if (pd->write_congestion_on > 0 2408 && pd->bio_queue_size >= pd->write_congestion_on) { 2409 struct wait_bit_queue_entry wqe; 2410 2411 init_wait_var_entry(&wqe, &pd->congested, 0); 2412 for (;;) { 2413 prepare_to_wait_event(__var_waitqueue(&pd->congested), 2414 &wqe.wq_entry, 2415 TASK_UNINTERRUPTIBLE); 2416 if (pd->bio_queue_size <= pd->write_congestion_off) 2417 break; 2418 pd->congested = true; 2419 spin_unlock(&pd->lock); 2420 schedule(); 2421 spin_lock(&pd->lock); 2422 } 2423 } 2424 spin_unlock(&pd->lock); 2425 2426 /* 2427 * No matching packet found. Store the bio in the work queue. 2428 */ 2429 node = mempool_alloc(&pd->rb_pool, GFP_NOIO); 2430 node->bio = bio; 2431 spin_lock(&pd->lock); 2432 BUG_ON(pd->bio_queue_size < 0); 2433 was_empty = (pd->bio_queue_size == 0); 2434 pkt_rbtree_insert(pd, node); 2435 spin_unlock(&pd->lock); 2436 2437 /* 2438 * Wake up the worker thread. 2439 */ 2440 atomic_set(&pd->scan_queue, 1); 2441 if (was_empty) { 2442 /* This wake_up is required for correct operation */ 2443 wake_up(&pd->wqueue); 2444 } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) { 2445 /* 2446 * This wake up is not required for correct operation, 2447 * but improves performance in some cases. 2448 */ 2449 wake_up(&pd->wqueue); 2450 } 2451 } 2452 2453 static void pkt_submit_bio(struct bio *bio) 2454 { 2455 struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->queue->queuedata; 2456 struct device *ddev = disk_to_dev(pd->disk); 2457 struct bio *split; 2458 2459 bio = bio_split_to_limits(bio); 2460 if (!bio) 2461 return; 2462 2463 dev_dbg(ddev, "start = %6llx stop = %6llx\n", 2464 (unsigned long long)bio->bi_iter.bi_sector, 2465 (unsigned long long)bio_end_sector(bio)); 2466 2467 /* 2468 * Clone READ bios so we can have our own bi_end_io callback. 2469 */ 2470 if (bio_data_dir(bio) == READ) { 2471 pkt_make_request_read(pd, bio); 2472 return; 2473 } 2474 2475 if (!test_bit(PACKET_WRITABLE, &pd->flags)) { 2476 dev_notice(ddev, "WRITE for ro device (%llu)\n", 2477 (unsigned long long)bio->bi_iter.bi_sector); 2478 goto end_io; 2479 } 2480 2481 if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) { 2482 dev_err(ddev, "wrong bio size\n"); 2483 goto end_io; 2484 } 2485 2486 do { 2487 sector_t zone = get_zone(bio->bi_iter.bi_sector, pd); 2488 sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd); 2489 2490 if (last_zone != zone) { 2491 BUG_ON(last_zone != zone + pd->settings.size); 2492 2493 split = bio_split(bio, last_zone - 2494 bio->bi_iter.bi_sector, 2495 GFP_NOIO, &pkt_bio_set); 2496 bio_chain(split, bio); 2497 } else { 2498 split = bio; 2499 } 2500 2501 pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split); 2502 } while (split != bio); 2503 2504 return; 2505 end_io: 2506 bio_io_error(bio); 2507 } 2508 2509 static void pkt_init_queue(struct pktcdvd_device *pd) 2510 { 2511 struct request_queue *q = pd->disk->queue; 2512 2513 blk_queue_logical_block_size(q, CD_FRAMESIZE); 2514 blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS); 2515 q->queuedata = pd; 2516 } 2517 2518 static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) 2519 { 2520 struct device *ddev = disk_to_dev(pd->disk); 2521 int i; 2522 struct block_device *bdev; 2523 struct scsi_device *sdev; 2524 2525 if (pd->pkt_dev == dev) { 2526 dev_err(ddev, "recursive setup not allowed\n"); 2527 return -EBUSY; 2528 } 2529 for (i = 0; i < MAX_WRITERS; i++) { 2530 struct pktcdvd_device *pd2 = pkt_devs[i]; 2531 if (!pd2) 2532 continue; 2533 if (pd2->bdev->bd_dev == dev) { 2534 dev_err(ddev, "%pg already setup\n", pd2->bdev); 2535 return -EBUSY; 2536 } 2537 if (pd2->pkt_dev == dev) { 2538 dev_err(ddev, "can't chain pktcdvd devices\n"); 2539 return -EBUSY; 2540 } 2541 } 2542 2543 bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL, NULL); 2544 if (IS_ERR(bdev)) 2545 return PTR_ERR(bdev); 2546 sdev = scsi_device_from_queue(bdev->bd_disk->queue); 2547 if (!sdev) { 2548 blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); 2549 return -EINVAL; 2550 } 2551 put_device(&sdev->sdev_gendev); 2552 2553 /* This is safe, since we have a reference from open(). */ 2554 __module_get(THIS_MODULE); 2555 2556 pd->bdev = bdev; 2557 set_blocksize(bdev, CD_FRAMESIZE); 2558 2559 pkt_init_queue(pd); 2560 2561 atomic_set(&pd->cdrw.pending_bios, 0); 2562 pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->disk->disk_name); 2563 if (IS_ERR(pd->cdrw.thread)) { 2564 dev_err(ddev, "can't start kernel thread\n"); 2565 goto out_mem; 2566 } 2567 2568 proc_create_single_data(pd->disk->disk_name, 0, pkt_proc, pkt_seq_show, pd); 2569 dev_notice(ddev, "writer mapped to %pg\n", bdev); 2570 return 0; 2571 2572 out_mem: 2573 blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); 2574 /* This is safe: open() is still holding a reference. */ 2575 module_put(THIS_MODULE); 2576 return -ENOMEM; 2577 } 2578 2579 static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) 2580 { 2581 struct pktcdvd_device *pd = bdev->bd_disk->private_data; 2582 struct device *ddev = disk_to_dev(pd->disk); 2583 int ret; 2584 2585 dev_dbg(ddev, "cmd %x, dev %d:%d\n", cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); 2586 2587 mutex_lock(&pktcdvd_mutex); 2588 switch (cmd) { 2589 case CDROMEJECT: 2590 /* 2591 * The door gets locked when the device is opened, so we 2592 * have to unlock it or else the eject command fails. 2593 */ 2594 if (pd->refcnt == 1) 2595 pkt_lock_door(pd, 0); 2596 fallthrough; 2597 /* 2598 * forward selected CDROM ioctls to CD-ROM, for UDF 2599 */ 2600 case CDROMMULTISESSION: 2601 case CDROMREADTOCENTRY: 2602 case CDROM_LAST_WRITTEN: 2603 case CDROM_SEND_PACKET: 2604 case SCSI_IOCTL_SEND_COMMAND: 2605 if (!bdev->bd_disk->fops->ioctl) 2606 ret = -ENOTTY; 2607 else 2608 ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); 2609 break; 2610 default: 2611 dev_dbg(ddev, "Unknown ioctl (%x)\n", cmd); 2612 ret = -ENOTTY; 2613 } 2614 mutex_unlock(&pktcdvd_mutex); 2615 2616 return ret; 2617 } 2618 2619 static unsigned int pkt_check_events(struct gendisk *disk, 2620 unsigned int clearing) 2621 { 2622 struct pktcdvd_device *pd = disk->private_data; 2623 struct gendisk *attached_disk; 2624 2625 if (!pd) 2626 return 0; 2627 if (!pd->bdev) 2628 return 0; 2629 attached_disk = pd->bdev->bd_disk; 2630 if (!attached_disk || !attached_disk->fops->check_events) 2631 return 0; 2632 return attached_disk->fops->check_events(attached_disk, clearing); 2633 } 2634 2635 static char *pkt_devnode(struct gendisk *disk, umode_t *mode) 2636 { 2637 return kasprintf(GFP_KERNEL, "pktcdvd/%s", disk->disk_name); 2638 } 2639 2640 static const struct block_device_operations pktcdvd_ops = { 2641 .owner = THIS_MODULE, 2642 .submit_bio = pkt_submit_bio, 2643 .open = pkt_open, 2644 .release = pkt_close, 2645 .ioctl = pkt_ioctl, 2646 .compat_ioctl = blkdev_compat_ptr_ioctl, 2647 .check_events = pkt_check_events, 2648 .devnode = pkt_devnode, 2649 }; 2650 2651 /* 2652 * Set up mapping from pktcdvd device to CD-ROM device. 2653 */ 2654 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) 2655 { 2656 int idx; 2657 int ret = -ENOMEM; 2658 struct pktcdvd_device *pd; 2659 struct gendisk *disk; 2660 2661 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 2662 2663 for (idx = 0; idx < MAX_WRITERS; idx++) 2664 if (!pkt_devs[idx]) 2665 break; 2666 if (idx == MAX_WRITERS) { 2667 pr_err("max %d writers supported\n", MAX_WRITERS); 2668 ret = -EBUSY; 2669 goto out_mutex; 2670 } 2671 2672 pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL); 2673 if (!pd) 2674 goto out_mutex; 2675 2676 ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE, 2677 sizeof(struct pkt_rb_node)); 2678 if (ret) 2679 goto out_mem; 2680 2681 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); 2682 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list); 2683 spin_lock_init(&pd->cdrw.active_list_lock); 2684 2685 spin_lock_init(&pd->lock); 2686 spin_lock_init(&pd->iosched.lock); 2687 bio_list_init(&pd->iosched.read_queue); 2688 bio_list_init(&pd->iosched.write_queue); 2689 init_waitqueue_head(&pd->wqueue); 2690 pd->bio_queue = RB_ROOT; 2691 2692 pd->write_congestion_on = write_congestion_on; 2693 pd->write_congestion_off = write_congestion_off; 2694 2695 ret = -ENOMEM; 2696 disk = blk_alloc_disk(NUMA_NO_NODE); 2697 if (!disk) 2698 goto out_mem; 2699 pd->disk = disk; 2700 disk->major = pktdev_major; 2701 disk->first_minor = idx; 2702 disk->minors = 1; 2703 disk->fops = &pktcdvd_ops; 2704 disk->flags = GENHD_FL_REMOVABLE | GENHD_FL_NO_PART; 2705 snprintf(disk->disk_name, sizeof(disk->disk_name), DRIVER_NAME"%d", idx); 2706 disk->private_data = pd; 2707 2708 pd->pkt_dev = MKDEV(pktdev_major, idx); 2709 ret = pkt_new_dev(pd, dev); 2710 if (ret) 2711 goto out_mem2; 2712 2713 /* inherit events of the host device */ 2714 disk->events = pd->bdev->bd_disk->events; 2715 2716 ret = add_disk(disk); 2717 if (ret) 2718 goto out_mem2; 2719 2720 pkt_sysfs_dev_new(pd); 2721 pkt_debugfs_dev_new(pd); 2722 2723 pkt_devs[idx] = pd; 2724 if (pkt_dev) 2725 *pkt_dev = pd->pkt_dev; 2726 2727 mutex_unlock(&ctl_mutex); 2728 return 0; 2729 2730 out_mem2: 2731 put_disk(disk); 2732 out_mem: 2733 mempool_exit(&pd->rb_pool); 2734 kfree(pd); 2735 out_mutex: 2736 mutex_unlock(&ctl_mutex); 2737 pr_err("setup of pktcdvd device failed\n"); 2738 return ret; 2739 } 2740 2741 /* 2742 * Tear down mapping from pktcdvd device to CD-ROM device. 2743 */ 2744 static int pkt_remove_dev(dev_t pkt_dev) 2745 { 2746 struct pktcdvd_device *pd; 2747 struct device *ddev; 2748 int idx; 2749 int ret = 0; 2750 2751 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 2752 2753 for (idx = 0; idx < MAX_WRITERS; idx++) { 2754 pd = pkt_devs[idx]; 2755 if (pd && (pd->pkt_dev == pkt_dev)) 2756 break; 2757 } 2758 if (idx == MAX_WRITERS) { 2759 pr_debug("dev not setup\n"); 2760 ret = -ENXIO; 2761 goto out; 2762 } 2763 2764 if (pd->refcnt > 0) { 2765 ret = -EBUSY; 2766 goto out; 2767 } 2768 2769 ddev = disk_to_dev(pd->disk); 2770 2771 if (!IS_ERR(pd->cdrw.thread)) 2772 kthread_stop(pd->cdrw.thread); 2773 2774 pkt_devs[idx] = NULL; 2775 2776 pkt_debugfs_dev_remove(pd); 2777 pkt_sysfs_dev_remove(pd); 2778 2779 blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY); 2780 2781 remove_proc_entry(pd->disk->disk_name, pkt_proc); 2782 dev_notice(ddev, "writer unmapped\n"); 2783 2784 del_gendisk(pd->disk); 2785 put_disk(pd->disk); 2786 2787 mempool_exit(&pd->rb_pool); 2788 kfree(pd); 2789 2790 /* This is safe: open() is still holding a reference. */ 2791 module_put(THIS_MODULE); 2792 2793 out: 2794 mutex_unlock(&ctl_mutex); 2795 return ret; 2796 } 2797 2798 static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd) 2799 { 2800 struct pktcdvd_device *pd; 2801 2802 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 2803 2804 pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index); 2805 if (pd) { 2806 ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev); 2807 ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev); 2808 } else { 2809 ctrl_cmd->dev = 0; 2810 ctrl_cmd->pkt_dev = 0; 2811 } 2812 ctrl_cmd->num_devices = MAX_WRITERS; 2813 2814 mutex_unlock(&ctl_mutex); 2815 } 2816 2817 static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2818 { 2819 void __user *argp = (void __user *)arg; 2820 struct pkt_ctrl_command ctrl_cmd; 2821 int ret = 0; 2822 dev_t pkt_dev = 0; 2823 2824 if (cmd != PACKET_CTRL_CMD) 2825 return -ENOTTY; 2826 2827 if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command))) 2828 return -EFAULT; 2829 2830 switch (ctrl_cmd.command) { 2831 case PKT_CTRL_CMD_SETUP: 2832 if (!capable(CAP_SYS_ADMIN)) 2833 return -EPERM; 2834 ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev); 2835 ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev); 2836 break; 2837 case PKT_CTRL_CMD_TEARDOWN: 2838 if (!capable(CAP_SYS_ADMIN)) 2839 return -EPERM; 2840 ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev)); 2841 break; 2842 case PKT_CTRL_CMD_STATUS: 2843 pkt_get_status(&ctrl_cmd); 2844 break; 2845 default: 2846 return -ENOTTY; 2847 } 2848 2849 if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command))) 2850 return -EFAULT; 2851 return ret; 2852 } 2853 2854 #ifdef CONFIG_COMPAT 2855 static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2856 { 2857 return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 2858 } 2859 #endif 2860 2861 static const struct file_operations pkt_ctl_fops = { 2862 .open = nonseekable_open, 2863 .unlocked_ioctl = pkt_ctl_ioctl, 2864 #ifdef CONFIG_COMPAT 2865 .compat_ioctl = pkt_ctl_compat_ioctl, 2866 #endif 2867 .owner = THIS_MODULE, 2868 .llseek = no_llseek, 2869 }; 2870 2871 static struct miscdevice pkt_misc = { 2872 .minor = MISC_DYNAMIC_MINOR, 2873 .name = DRIVER_NAME, 2874 .nodename = "pktcdvd/control", 2875 .fops = &pkt_ctl_fops 2876 }; 2877 2878 static int __init pkt_init(void) 2879 { 2880 int ret; 2881 2882 mutex_init(&ctl_mutex); 2883 2884 ret = mempool_init_kmalloc_pool(&psd_pool, PSD_POOL_SIZE, 2885 sizeof(struct packet_stacked_data)); 2886 if (ret) 2887 return ret; 2888 ret = bioset_init(&pkt_bio_set, BIO_POOL_SIZE, 0, 0); 2889 if (ret) { 2890 mempool_exit(&psd_pool); 2891 return ret; 2892 } 2893 2894 ret = register_blkdev(pktdev_major, DRIVER_NAME); 2895 if (ret < 0) { 2896 pr_err("unable to register block device\n"); 2897 goto out2; 2898 } 2899 if (!pktdev_major) 2900 pktdev_major = ret; 2901 2902 ret = pkt_sysfs_init(); 2903 if (ret) 2904 goto out; 2905 2906 pkt_debugfs_init(); 2907 2908 ret = misc_register(&pkt_misc); 2909 if (ret) { 2910 pr_err("unable to register misc device\n"); 2911 goto out_misc; 2912 } 2913 2914 pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL); 2915 2916 return 0; 2917 2918 out_misc: 2919 pkt_debugfs_cleanup(); 2920 pkt_sysfs_cleanup(); 2921 out: 2922 unregister_blkdev(pktdev_major, DRIVER_NAME); 2923 out2: 2924 mempool_exit(&psd_pool); 2925 bioset_exit(&pkt_bio_set); 2926 return ret; 2927 } 2928 2929 static void __exit pkt_exit(void) 2930 { 2931 remove_proc_entry("driver/"DRIVER_NAME, NULL); 2932 misc_deregister(&pkt_misc); 2933 2934 pkt_debugfs_cleanup(); 2935 pkt_sysfs_cleanup(); 2936 2937 unregister_blkdev(pktdev_major, DRIVER_NAME); 2938 mempool_exit(&psd_pool); 2939 bioset_exit(&pkt_bio_set); 2940 } 2941 2942 MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives"); 2943 MODULE_AUTHOR("Jens Axboe <axboe@suse.de>"); 2944 MODULE_LICENSE("GPL"); 2945 2946 module_init(pkt_init); 2947 module_exit(pkt_exit); 2948